summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mlir/bindings/python/pybind.cpp2
-rw-r--r--mlir/examples/toy/Ch2/include/toy/Ops.td8
-rw-r--r--mlir/examples/toy/Ch2/mlir/Dialect.cpp9
-rw-r--r--mlir/examples/toy/Ch2/mlir/MLIRGen.cpp41
-rw-r--r--mlir/examples/toy/Ch3/include/toy/Ops.td8
-rw-r--r--mlir/examples/toy/Ch3/mlir/Dialect.cpp9
-rw-r--r--mlir/examples/toy/Ch3/mlir/MLIRGen.cpp41
-rw-r--r--mlir/examples/toy/Ch3/mlir/ToyCombine.cpp2
-rw-r--r--mlir/examples/toy/Ch4/include/toy/Ops.td8
-rw-r--r--mlir/examples/toy/Ch4/mlir/Dialect.cpp13
-rw-r--r--mlir/examples/toy/Ch4/mlir/MLIRGen.cpp41
-rw-r--r--mlir/examples/toy/Ch4/mlir/ToyCombine.cpp2
-rw-r--r--mlir/examples/toy/Ch5/include/toy/Ops.td8
-rw-r--r--mlir/examples/toy/Ch5/mlir/Dialect.cpp13
-rw-r--r--mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp36
-rw-r--r--mlir/examples/toy/Ch5/mlir/MLIRGen.cpp41
-rw-r--r--mlir/examples/toy/Ch5/mlir/ToyCombine.cpp2
-rw-r--r--mlir/examples/toy/Ch6/include/toy/Ops.td8
-rw-r--r--mlir/examples/toy/Ch6/mlir/Dialect.cpp13
-rw-r--r--mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp36
-rw-r--r--mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp24
-rw-r--r--mlir/examples/toy/Ch6/mlir/MLIRGen.cpp41
-rw-r--r--mlir/examples/toy/Ch6/mlir/ToyCombine.cpp2
-rw-r--r--mlir/examples/toy/Ch7/include/toy/Ops.td10
-rw-r--r--mlir/examples/toy/Ch7/mlir/Dialect.cpp15
-rw-r--r--mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp36
-rw-r--r--mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp24
-rw-r--r--mlir/examples/toy/Ch7/mlir/MLIRGen.cpp40
-rw-r--r--mlir/examples/toy/Ch7/mlir/ToyCombine.cpp2
-rw-r--r--mlir/g3doc/DeclarativeRewrites.md6
-rw-r--r--mlir/g3doc/DialectConversion.md6
-rw-r--r--mlir/g3doc/EDSC.md8
-rw-r--r--mlir/g3doc/GenericDAGRewriter.md2
-rw-r--r--mlir/g3doc/OpDefinitions.md14
-rw-r--r--mlir/g3doc/QuickstartRewrites.md4
-rw-r--r--mlir/g3doc/Rationale.md2
-rw-r--r--mlir/g3doc/Tutorials/Toy/Ch-3.md2
-rw-r--r--mlir/g3doc/Tutorials/Toy/Ch-4.md4
-rw-r--r--mlir/g3doc/Tutorials/Toy/Ch-5.md10
-rw-r--r--mlir/g3doc/UsageOfConst.md8
-rw-r--r--mlir/include/mlir/Analysis/AffineAnalysis.h9
-rw-r--r--mlir/include/mlir/Analysis/AffineStructures.h72
-rw-r--r--mlir/include/mlir/Analysis/CallInterfaces.h4
-rw-r--r--mlir/include/mlir/Analysis/Dominance.h4
-rw-r--r--mlir/include/mlir/Analysis/Liveness.h17
-rw-r--r--mlir/include/mlir/Analysis/LoopAnalysis.h9
-rw-r--r--mlir/include/mlir/Analysis/Utils.h10
-rw-r--r--mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h13
-rw-r--r--mlir/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h7
-rw-r--r--mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h57
-rw-r--r--mlir/include/mlir/Dialect/AffineOps/AffineOps.h105
-rw-r--r--mlir/include/mlir/Dialect/AffineOps/AffineOps.td8
-rw-r--r--mlir/include/mlir/Dialect/GPU/GPUDialect.h6
-rw-r--r--mlir/include/mlir/Dialect/GPU/GPUOps.td16
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h6
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td22
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h16
-rw-r--r--mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h20
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td10
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td16
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td10
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h8
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td2
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h4
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Utils/Utils.h36
-rw-r--r--mlir/include/mlir/Dialect/LoopOps/LoopOps.h2
-rw-r--r--mlir/include/mlir/Dialect/LoopOps/LoopOps.td12
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td2
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td2
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td4
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/SPIRVLowering.h4
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/SPIRVOps.td6
-rw-r--r--mlir/include/mlir/Dialect/StandardOps/Ops.h35
-rw-r--r--mlir/include/mlir/Dialect/StandardOps/Ops.td78
-rw-r--r--mlir/include/mlir/Dialect/VectorOps/Utils.h5
-rw-r--r--mlir/include/mlir/Dialect/VectorOps/VectorOps.td22
-rw-r--r--mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h5
-rw-r--r--mlir/include/mlir/EDSC/Builders.h32
-rw-r--r--mlir/include/mlir/EDSC/Helpers.h10
-rw-r--r--mlir/include/mlir/EDSC/Intrinsics.h26
-rw-r--r--mlir/include/mlir/IR/Block.h8
-rw-r--r--mlir/include/mlir/IR/BlockAndValueMapping.h8
-rw-r--r--mlir/include/mlir/IR/Builders.h10
-rw-r--r--mlir/include/mlir/IR/FunctionSupport.h2
-rw-r--r--mlir/include/mlir/IR/Matchers.h14
-rw-r--r--mlir/include/mlir/IR/OpDefinition.h40
-rw-r--r--mlir/include/mlir/IR/OpImplementation.h30
-rw-r--r--mlir/include/mlir/IR/Operation.h22
-rw-r--r--mlir/include/mlir/IR/OperationSupport.h45
-rw-r--r--mlir/include/mlir/IR/TypeUtilities.h12
-rw-r--r--mlir/include/mlir/IR/Value.h22
-rw-r--r--mlir/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h10
-rw-r--r--mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h2
-rw-r--r--mlir/include/mlir/Transforms/DialectConversion.h46
-rw-r--r--mlir/include/mlir/Transforms/FoldUtils.h10
-rw-r--r--mlir/include/mlir/Transforms/InliningUtils.h14
-rw-r--r--mlir/include/mlir/Transforms/LoopLikeInterface.td2
-rw-r--r--mlir/include/mlir/Transforms/LoopUtils.h12
-rw-r--r--mlir/include/mlir/Transforms/RegionUtils.h8
-rw-r--r--mlir/include/mlir/Transforms/Utils.h20
-rw-r--r--mlir/lib/Analysis/AffineAnalysis.cpp60
-rw-r--r--mlir/lib/Analysis/AffineStructures.cpp94
-rw-r--r--mlir/lib/Analysis/CallGraph.cpp2
-rw-r--r--mlir/lib/Analysis/Dominance.cpp2
-rw-r--r--mlir/lib/Analysis/Liveness.cpp34
-rw-r--r--mlir/lib/Analysis/LoopAnalysis.cpp30
-rw-r--r--mlir/lib/Analysis/SliceAnalysis.cpp4
-rw-r--r--mlir/lib/Analysis/Utils.cpp42
-rw-r--r--mlir/lib/Analysis/VectorAnalysis.cpp4
-rw-r--r--mlir/lib/Analysis/Verifier.cpp6
-rw-r--r--mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp139
-rw-r--r--mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h4
-rw-r--r--mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h6
-rw-r--r--mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp46
-rw-r--r--mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp217
-rw-r--r--mlir/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.cpp30
-rw-r--r--mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp68
-rw-r--r--mlir/lib/Conversion/LoopToStandard/ConvertLoopToStandard.cpp18
-rw-r--r--mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp111
-rw-r--r--mlir/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp2
-rw-r--r--mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp301
-rw-r--r--mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp37
-rw-r--r--mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.cpp4
-rw-r--r--mlir/lib/Conversion/StandardToSPIRV/LegalizeStandardForSPIRV.cpp8
-rw-r--r--mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp108
-rw-r--r--mlir/lib/Dialect/AffineOps/AffineOps.cpp147
-rw-r--r--mlir/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp64
-rw-r--r--mlir/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h6
-rw-r--r--mlir/lib/Dialect/GPU/IR/GPUDialect.cpp47
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp12
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp26
-rw-r--r--mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp20
-rw-r--r--mlir/lib/Dialect/Linalg/EDSC/Builders.cpp18
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp4
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp32
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp44
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp6
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp35
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp53
-rw-r--r--mlir/lib/Dialect/Linalg/Utils/Utils.cpp24
-rw-r--r--mlir/lib/Dialect/LoopOps/LoopOps.cpp12
-rw-r--r--mlir/lib/Dialect/SPIRV/SPIRVDialect.cpp2
-rw-r--r--mlir/lib/Dialect/SPIRV/SPIRVLowering.cpp8
-rw-r--r--mlir/lib/Dialect/SPIRV/SPIRVOps.cpp39
-rw-r--r--mlir/lib/Dialect/SPIRV/Serialization/Deserializer.cpp38
-rw-r--r--mlir/lib/Dialect/SPIRV/Serialization/Serializer.cpp18
-rw-r--r--mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp6
-rw-r--r--mlir/lib/Dialect/StandardOps/Ops.cpp66
-rw-r--r--mlir/lib/Dialect/VectorOps/VectorOps.cpp30
-rw-r--r--mlir/lib/Dialect/VectorOps/VectorTransforms.cpp76
-rw-r--r--mlir/lib/EDSC/Builders.cpp23
-rw-r--r--mlir/lib/EDSC/Helpers.cpp6
-rw-r--r--mlir/lib/EDSC/Intrinsics.cpp12
-rw-r--r--mlir/lib/IR/AsmPrinter.cpp50
-rw-r--r--mlir/lib/IR/Block.cpp4
-rw-r--r--mlir/lib/IR/Builders.cpp4
-rw-r--r--mlir/lib/IR/Operation.cpp26
-rw-r--r--mlir/lib/IR/OperationSupport.cpp13
-rw-r--r--mlir/lib/IR/Region.cpp6
-rw-r--r--mlir/lib/IR/TypeUtilities.cpp12
-rw-r--r--mlir/lib/IR/Value.cpp4
-rw-r--r--mlir/lib/Parser/Parser.cpp65
-rw-r--r--mlir/lib/Pass/IRPrinting.cpp4
-rw-r--r--mlir/lib/Quantizer/Support/ConstraintAnalysisGraph.cpp2
-rw-r--r--mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp2
-rw-r--r--mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp14
-rw-r--r--mlir/lib/TableGen/Pattern.cpp2
-rw-r--r--mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp38
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleTranslation.cpp10
-rw-r--r--mlir/lib/Transforms/AffineDataCopyGeneration.cpp2
-rw-r--r--mlir/lib/Transforms/AffineLoopInvariantCodeMotion.cpp21
-rw-r--r--mlir/lib/Transforms/DialectConversion.cpp58
-rw-r--r--mlir/lib/Transforms/LoopFusion.cpp93
-rw-r--r--mlir/lib/Transforms/LoopInvariantCodeMotion.cpp4
-rw-r--r--mlir/lib/Transforms/LoopTiling.cpp11
-rw-r--r--mlir/lib/Transforms/LoopUnrollAndJam.cpp4
-rw-r--r--mlir/lib/Transforms/MemRefDataFlowOpt.cpp6
-rw-r--r--mlir/lib/Transforms/PipelineDataTransfer.cpp14
-rw-r--r--mlir/lib/Transforms/Utils/FoldUtils.cpp8
-rw-r--r--mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp8
-rw-r--r--mlir/lib/Transforms/Utils/InliningUtils.cpp36
-rw-r--r--mlir/lib/Transforms/Utils/LoopFusionUtils.cpp16
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp169
-rw-r--r--mlir/lib/Transforms/Utils/RegionUtils.cpp24
-rw-r--r--mlir/lib/Transforms/Utils/Utils.cpp57
-rw-r--r--mlir/lib/Transforms/Vectorize.cpp40
-rw-r--r--mlir/test/EDSC/builder-api-test.cpp4
-rw-r--r--mlir/test/lib/TestDialect/TestDialect.cpp8
-rw-r--r--mlir/test/lib/TestDialect/TestOps.td2
-rw-r--r--mlir/test/lib/TestDialect/TestPatterns.cpp33
-rw-r--r--mlir/test/lib/Transforms/TestLoopMapping.cpp2
-rw-r--r--mlir/test/lib/Transforms/TestVectorizationUtils.cpp2
-rw-r--r--mlir/test/mlir-tblgen/op-attribute.td6
-rw-r--r--mlir/test/mlir-tblgen/op-decl.td24
-rw-r--r--mlir/test/mlir-tblgen/op-operand.td10
-rw-r--r--mlir/test/mlir-tblgen/op-result.td6
-rw-r--r--mlir/test/mlir-tblgen/predicate.td4
-rw-r--r--mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp27
-rw-r--r--mlir/tools/mlir-tblgen/RewriterGen.cpp20
-rw-r--r--mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp2
-rw-r--r--mlir/unittests/IR/OperationSupportTest.cpp8
201 files changed, 2493 insertions, 2413 deletions
diff --git a/mlir/bindings/python/pybind.cpp b/mlir/bindings/python/pybind.cpp
index 825f800c0bd..54646cbe800 100644
--- a/mlir/bindings/python/pybind.cpp
+++ b/mlir/bindings/python/pybind.cpp
@@ -103,7 +103,7 @@ struct PythonValueHandle {
assert(value.hasType() && value.getType().isa<FunctionType>() &&
"can only call function-typed values");
- std::vector<Value *> argValues;
+ std::vector<ValuePtr> argValues;
argValues.reserve(args.size());
for (auto arg : args)
argValues.push_back(arg.value.getValue());
diff --git a/mlir/examples/toy/Ch2/include/toy/Ops.td b/mlir/examples/toy/Ch2/include/toy/Ops.td
index f7c011915ff..dd88b097ab1 100644
--- a/mlir/examples/toy/Ch2/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch2/include/toy/Ops.td
@@ -98,7 +98,7 @@ def AddOp : Toy_Op<"add"> {
// Allow building an AddOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -129,7 +129,7 @@ def GenericCallOp : Toy_Op<"generic_call"> {
// Add custom build methods for the generic call operation.
let builders = [
OpBuilder<"Builder *builder, OperationState &state, "
- "StringRef callee, ArrayRef<Value *> arguments">
+ "StringRef callee, ArrayRef<ValuePtr> arguments">
];
}
@@ -145,7 +145,7 @@ def MulOp : Toy_Op<"mul"> {
// Allow building a MulOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -219,7 +219,7 @@ def TransposeOp : Toy_Op<"transpose"> {
// Allow building a TransposeOp with from the input operand.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *input">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr input">
];
// Invoke a static verify method to verify this transpose operation.
diff --git a/mlir/examples/toy/Ch2/mlir/Dialect.cpp b/mlir/examples/toy/Ch2/mlir/Dialect.cpp
index 86f648dbe0e..4a3232dabe3 100644
--- a/mlir/examples/toy/Ch2/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch2/mlir/Dialect.cpp
@@ -94,7 +94,7 @@ static mlir::LogicalResult verify(ConstantOp op) {
// AddOp
void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -103,7 +103,8 @@ void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
// GenericCallOp
void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state,
- StringRef callee, ArrayRef<mlir::Value *> arguments) {
+ StringRef callee,
+ ArrayRef<mlir::ValuePtr> arguments) {
// Generic call always returns an unranked Tensor initially.
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(arguments);
@@ -114,7 +115,7 @@ void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state,
// MulOp
void MulOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -161,7 +162,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
// TransposeOp
void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *value) {
+ mlir::ValuePtr value) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(value);
}
diff --git a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
index da474e809b3..902c634a954 100644
--- a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
@@ -99,7 +99,7 @@ private:
/// Entering a function creates a new scope, and the function arguments are
/// added to the mapping. When the processing of a function is terminated, the
/// scope is destroyed and the mappings created in this scope are dropped.
- llvm::ScopedHashTable<StringRef, mlir::Value *> symbolTable;
+ llvm::ScopedHashTable<StringRef, mlir::ValuePtr> symbolTable;
/// Helper conversion for a Toy AST location to an MLIR location.
mlir::Location loc(Location loc) {
@@ -109,7 +109,7 @@ private:
/// Declare a variable in the current scope, return success if the variable
/// wasn't declared yet.
- mlir::LogicalResult declare(llvm::StringRef var, mlir::Value *value) {
+ mlir::LogicalResult declare(llvm::StringRef var, mlir::ValuePtr value) {
if (symbolTable.count(var))
return mlir::failure();
symbolTable.insert(var, value);
@@ -132,7 +132,8 @@ private:
/// Emit a new function and add it to the MLIR module.
mlir::FuncOp mlirGen(FunctionAST &funcAST) {
// Create a scope in the symbol table to hold variable declarations.
- ScopedHashTableScope<llvm::StringRef, mlir::Value *> var_scope(symbolTable);
+ ScopedHashTableScope<llvm::StringRef, mlir::ValuePtr> var_scope(
+ symbolTable);
// Create an MLIR function for the given prototype.
mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -183,7 +184,7 @@ private:
}
/// Emit a binary operation
- mlir::Value *mlirGen(BinaryExprAST &binop) {
+ mlir::ValuePtr mlirGen(BinaryExprAST &binop) {
// First emit the operations for each side of the operation before emitting
// the operation itself. For example if the expression is `a + foo(a)`
// 1) First it will visiting the LHS, which will return a reference to the
@@ -195,10 +196,10 @@ private:
// and the result value is returned. If an error occurs we get a nullptr
// and propagate.
//
- mlir::Value *lhs = mlirGen(*binop.getLHS());
+ mlir::ValuePtr lhs = mlirGen(*binop.getLHS());
if (!lhs)
return nullptr;
- mlir::Value *rhs = mlirGen(*binop.getRHS());
+ mlir::ValuePtr rhs = mlirGen(*binop.getRHS());
if (!rhs)
return nullptr;
auto location = loc(binop.loc());
@@ -219,8 +220,8 @@ private:
/// This is a reference to a variable in an expression. The variable is
/// expected to have been declared and so should have a value in the symbol
/// table, otherwise emit an error and return nullptr.
- mlir::Value *mlirGen(VariableExprAST &expr) {
- if (auto *variable = symbolTable.lookup(expr.getName()))
+ mlir::ValuePtr mlirGen(VariableExprAST &expr) {
+ if (auto variable = symbolTable.lookup(expr.getName()))
return variable;
emitError(loc(expr.loc()), "error: unknown variable '")
@@ -233,7 +234,7 @@ private:
auto location = loc(ret.loc());
// 'return' takes an optional expression, handle that case here.
- mlir::Value *expr = nullptr;
+ mlir::ValuePtr expr = nullptr;
if (ret.getExpr().hasValue()) {
if (!(expr = mlirGen(*ret.getExpr().getValue())))
return mlir::failure();
@@ -241,7 +242,7 @@ private:
// Otherwise, this return operation has zero operands.
builder.create<ReturnOp>(location, expr ? makeArrayRef(expr)
- : ArrayRef<mlir::Value *>());
+ : ArrayRef<mlir::ValuePtr>());
return mlir::success();
}
@@ -263,7 +264,7 @@ private:
/// [[1.000000e+00, 2.000000e+00, 3.000000e+00],
/// [4.000000e+00, 5.000000e+00, 6.000000e+00]]>} : () -> tensor<2x3xf64>
///
- mlir::Value *mlirGen(LiteralExprAST &lit) {
+ mlir::ValuePtr mlirGen(LiteralExprAST &lit) {
auto type = getType(lit.getDims());
// The attribute is a vector with a floating point value per element
@@ -309,14 +310,14 @@ private:
/// Emit a call expression. It emits specific operations for the `transpose`
/// builtin. Other identifiers are assumed to be user-defined functions.
- mlir::Value *mlirGen(CallExprAST &call) {
+ mlir::ValuePtr mlirGen(CallExprAST &call) {
llvm::StringRef callee = call.getCallee();
auto location = loc(call.loc());
// Codegen the operands first.
- SmallVector<mlir::Value *, 4> operands;
+ SmallVector<mlir::ValuePtr, 4> operands;
for (auto &expr : call.getArgs()) {
- auto *arg = mlirGen(*expr);
+ auto arg = mlirGen(*expr);
if (!arg)
return nullptr;
operands.push_back(arg);
@@ -342,7 +343,7 @@ private:
/// Emit a print expression. It emits specific operations for two builtins:
/// transpose(x) and print(x).
mlir::LogicalResult mlirGen(PrintExprAST &call) {
- auto *arg = mlirGen(*call.getArg());
+ auto arg = mlirGen(*call.getArg());
if (!arg)
return mlir::failure();
@@ -351,12 +352,12 @@ private:
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
- mlir::Value *mlirGen(NumberExprAST &num) {
+ mlir::ValuePtr mlirGen(NumberExprAST &num) {
return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
- mlir::Value *mlirGen(ExprAST &expr) {
+ mlir::ValuePtr mlirGen(ExprAST &expr) {
switch (expr.getKind()) {
case toy::ExprAST::Expr_BinOp:
return mlirGen(cast<BinaryExprAST>(expr));
@@ -380,7 +381,7 @@ private:
/// initializer and record the value in the symbol table before returning it.
/// Future expressions will be able to reference this variable through symbol
/// table lookup.
- mlir::Value *mlirGen(VarDeclExprAST &vardecl) {
+ mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) {
auto init = vardecl.getInitVal();
if (!init) {
emitError(loc(vardecl.loc()),
@@ -388,7 +389,7 @@ private:
return nullptr;
}
- mlir::Value *value = mlirGen(*init);
+ mlir::ValuePtr value = mlirGen(*init);
if (!value)
return nullptr;
@@ -408,7 +409,7 @@ private:
/// Codegen a list of expression, return failure if one of them hit an error.
mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
- ScopedHashTableScope<StringRef, mlir::Value *> var_scope(symbolTable);
+ ScopedHashTableScope<StringRef, mlir::ValuePtr> var_scope(symbolTable);
for (auto &expr : blockAST) {
// Specific handling for variable declarations, return statement, and
// print. These can only appear in block list and not in nested
diff --git a/mlir/examples/toy/Ch3/include/toy/Ops.td b/mlir/examples/toy/Ch3/include/toy/Ops.td
index 921e503e416..6c400169da2 100644
--- a/mlir/examples/toy/Ch3/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch3/include/toy/Ops.td
@@ -98,7 +98,7 @@ def AddOp : Toy_Op<"add", [NoSideEffect]> {
// Allow building an AddOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -129,7 +129,7 @@ def GenericCallOp : Toy_Op<"generic_call"> {
// Add custom build methods for the generic call operation.
let builders = [
OpBuilder<"Builder *builder, OperationState &state, "
- "StringRef callee, ArrayRef<Value *> arguments">
+ "StringRef callee, ArrayRef<ValuePtr> arguments">
];
}
@@ -145,7 +145,7 @@ def MulOp : Toy_Op<"mul", [NoSideEffect]> {
// Allow building a MulOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -225,7 +225,7 @@ def TransposeOp : Toy_Op<"transpose", [NoSideEffect]> {
// Allow building a TransposeOp with from the input operand.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *input">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr input">
];
// Invoke a static verify method to verify this transpose operation.
diff --git a/mlir/examples/toy/Ch3/mlir/Dialect.cpp b/mlir/examples/toy/Ch3/mlir/Dialect.cpp
index 86f648dbe0e..4a3232dabe3 100644
--- a/mlir/examples/toy/Ch3/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch3/mlir/Dialect.cpp
@@ -94,7 +94,7 @@ static mlir::LogicalResult verify(ConstantOp op) {
// AddOp
void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -103,7 +103,8 @@ void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
// GenericCallOp
void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state,
- StringRef callee, ArrayRef<mlir::Value *> arguments) {
+ StringRef callee,
+ ArrayRef<mlir::ValuePtr> arguments) {
// Generic call always returns an unranked Tensor initially.
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(arguments);
@@ -114,7 +115,7 @@ void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state,
// MulOp
void MulOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -161,7 +162,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
// TransposeOp
void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *value) {
+ mlir::ValuePtr value) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(value);
}
diff --git a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
index da474e809b3..902c634a954 100644
--- a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
@@ -99,7 +99,7 @@ private:
/// Entering a function creates a new scope, and the function arguments are
/// added to the mapping. When the processing of a function is terminated, the
/// scope is destroyed and the mappings created in this scope are dropped.
- llvm::ScopedHashTable<StringRef, mlir::Value *> symbolTable;
+ llvm::ScopedHashTable<StringRef, mlir::ValuePtr> symbolTable;
/// Helper conversion for a Toy AST location to an MLIR location.
mlir::Location loc(Location loc) {
@@ -109,7 +109,7 @@ private:
/// Declare a variable in the current scope, return success if the variable
/// wasn't declared yet.
- mlir::LogicalResult declare(llvm::StringRef var, mlir::Value *value) {
+ mlir::LogicalResult declare(llvm::StringRef var, mlir::ValuePtr value) {
if (symbolTable.count(var))
return mlir::failure();
symbolTable.insert(var, value);
@@ -132,7 +132,8 @@ private:
/// Emit a new function and add it to the MLIR module.
mlir::FuncOp mlirGen(FunctionAST &funcAST) {
// Create a scope in the symbol table to hold variable declarations.
- ScopedHashTableScope<llvm::StringRef, mlir::Value *> var_scope(symbolTable);
+ ScopedHashTableScope<llvm::StringRef, mlir::ValuePtr> var_scope(
+ symbolTable);
// Create an MLIR function for the given prototype.
mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -183,7 +184,7 @@ private:
}
/// Emit a binary operation
- mlir::Value *mlirGen(BinaryExprAST &binop) {
+ mlir::ValuePtr mlirGen(BinaryExprAST &binop) {
// First emit the operations for each side of the operation before emitting
// the operation itself. For example if the expression is `a + foo(a)`
// 1) First it will visiting the LHS, which will return a reference to the
@@ -195,10 +196,10 @@ private:
// and the result value is returned. If an error occurs we get a nullptr
// and propagate.
//
- mlir::Value *lhs = mlirGen(*binop.getLHS());
+ mlir::ValuePtr lhs = mlirGen(*binop.getLHS());
if (!lhs)
return nullptr;
- mlir::Value *rhs = mlirGen(*binop.getRHS());
+ mlir::ValuePtr rhs = mlirGen(*binop.getRHS());
if (!rhs)
return nullptr;
auto location = loc(binop.loc());
@@ -219,8 +220,8 @@ private:
/// This is a reference to a variable in an expression. The variable is
/// expected to have been declared and so should have a value in the symbol
/// table, otherwise emit an error and return nullptr.
- mlir::Value *mlirGen(VariableExprAST &expr) {
- if (auto *variable = symbolTable.lookup(expr.getName()))
+ mlir::ValuePtr mlirGen(VariableExprAST &expr) {
+ if (auto variable = symbolTable.lookup(expr.getName()))
return variable;
emitError(loc(expr.loc()), "error: unknown variable '")
@@ -233,7 +234,7 @@ private:
auto location = loc(ret.loc());
// 'return' takes an optional expression, handle that case here.
- mlir::Value *expr = nullptr;
+ mlir::ValuePtr expr = nullptr;
if (ret.getExpr().hasValue()) {
if (!(expr = mlirGen(*ret.getExpr().getValue())))
return mlir::failure();
@@ -241,7 +242,7 @@ private:
// Otherwise, this return operation has zero operands.
builder.create<ReturnOp>(location, expr ? makeArrayRef(expr)
- : ArrayRef<mlir::Value *>());
+ : ArrayRef<mlir::ValuePtr>());
return mlir::success();
}
@@ -263,7 +264,7 @@ private:
/// [[1.000000e+00, 2.000000e+00, 3.000000e+00],
/// [4.000000e+00, 5.000000e+00, 6.000000e+00]]>} : () -> tensor<2x3xf64>
///
- mlir::Value *mlirGen(LiteralExprAST &lit) {
+ mlir::ValuePtr mlirGen(LiteralExprAST &lit) {
auto type = getType(lit.getDims());
// The attribute is a vector with a floating point value per element
@@ -309,14 +310,14 @@ private:
/// Emit a call expression. It emits specific operations for the `transpose`
/// builtin. Other identifiers are assumed to be user-defined functions.
- mlir::Value *mlirGen(CallExprAST &call) {
+ mlir::ValuePtr mlirGen(CallExprAST &call) {
llvm::StringRef callee = call.getCallee();
auto location = loc(call.loc());
// Codegen the operands first.
- SmallVector<mlir::Value *, 4> operands;
+ SmallVector<mlir::ValuePtr, 4> operands;
for (auto &expr : call.getArgs()) {
- auto *arg = mlirGen(*expr);
+ auto arg = mlirGen(*expr);
if (!arg)
return nullptr;
operands.push_back(arg);
@@ -342,7 +343,7 @@ private:
/// Emit a print expression. It emits specific operations for two builtins:
/// transpose(x) and print(x).
mlir::LogicalResult mlirGen(PrintExprAST &call) {
- auto *arg = mlirGen(*call.getArg());
+ auto arg = mlirGen(*call.getArg());
if (!arg)
return mlir::failure();
@@ -351,12 +352,12 @@ private:
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
- mlir::Value *mlirGen(NumberExprAST &num) {
+ mlir::ValuePtr mlirGen(NumberExprAST &num) {
return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
- mlir::Value *mlirGen(ExprAST &expr) {
+ mlir::ValuePtr mlirGen(ExprAST &expr) {
switch (expr.getKind()) {
case toy::ExprAST::Expr_BinOp:
return mlirGen(cast<BinaryExprAST>(expr));
@@ -380,7 +381,7 @@ private:
/// initializer and record the value in the symbol table before returning it.
/// Future expressions will be able to reference this variable through symbol
/// table lookup.
- mlir::Value *mlirGen(VarDeclExprAST &vardecl) {
+ mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) {
auto init = vardecl.getInitVal();
if (!init) {
emitError(loc(vardecl.loc()),
@@ -388,7 +389,7 @@ private:
return nullptr;
}
- mlir::Value *value = mlirGen(*init);
+ mlir::ValuePtr value = mlirGen(*init);
if (!value)
return nullptr;
@@ -408,7 +409,7 @@ private:
/// Codegen a list of expression, return failure if one of them hit an error.
mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
- ScopedHashTableScope<StringRef, mlir::Value *> var_scope(symbolTable);
+ ScopedHashTableScope<StringRef, mlir::ValuePtr> var_scope(symbolTable);
for (auto &expr : blockAST) {
// Specific handling for variable declarations, return statement, and
// print. These can only appear in block list and not in nested
diff --git a/mlir/examples/toy/Ch3/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch3/mlir/ToyCombine.cpp
index 1b9dcd20291..42a10397513 100644
--- a/mlir/examples/toy/Ch3/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch3/mlir/ToyCombine.cpp
@@ -48,7 +48,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
matchAndRewrite(TransposeOp op,
mlir::PatternRewriter &rewriter) const override {
// Look through the input of the current transpose.
- mlir::Value *transposeInput = op.getOperand();
+ mlir::ValuePtr transposeInput = op.getOperand();
TransposeOp transposeInputOp =
llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
diff --git a/mlir/examples/toy/Ch4/include/toy/Ops.td b/mlir/examples/toy/Ch4/include/toy/Ops.td
index aec1cc3cfc9..ef5b30a862b 100644
--- a/mlir/examples/toy/Ch4/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch4/include/toy/Ops.td
@@ -100,7 +100,7 @@ def AddOp : Toy_Op<"add",
// Allow building an AddOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -151,7 +151,7 @@ def GenericCallOp : Toy_Op<"generic_call",
// Add custom build methods for the generic call operation.
let builders = [
OpBuilder<"Builder *builder, OperationState &state, "
- "StringRef callee, ArrayRef<Value *> arguments">
+ "StringRef callee, ArrayRef<ValuePtr> arguments">
];
}
@@ -168,7 +168,7 @@ def MulOp : Toy_Op<"mul",
// Allow building a MulOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -245,7 +245,7 @@ def TransposeOp : Toy_Op<"transpose",
// Allow building a TransposeOp with from the input operand.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *input">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr input">
];
// Invoke a static verify method to verify this transpose operation.
diff --git a/mlir/examples/toy/Ch4/mlir/Dialect.cpp b/mlir/examples/toy/Ch4/mlir/Dialect.cpp
index 7003cbdcc81..8be1094cf15 100644
--- a/mlir/examples/toy/Ch4/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch4/mlir/Dialect.cpp
@@ -55,7 +55,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
/// Handle the given inlined terminator(toy.return) by replacing it with a new
/// operation as necessary.
void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const final {
+ ArrayRef<ValuePtr> valuesToRepl) const final {
// Only "toy.return" needs to be handled here.
auto returnOp = cast<ReturnOp>(op);
@@ -70,7 +70,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
/// operation that takes 'input' as the only operand, and produces a single
/// result of 'resultType'. If a conversion can not be generated, nullptr
/// should be returned.
- Operation *materializeCallConversion(OpBuilder &builder, Value *input,
+ Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input,
Type resultType,
Location conversionLoc) const final {
return builder.create<CastOp>(conversionLoc, resultType, input);
@@ -144,7 +144,7 @@ static mlir::LogicalResult verify(ConstantOp op) {
// AddOp
void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -164,7 +164,8 @@ void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
// GenericCallOp
void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state,
- StringRef callee, ArrayRef<mlir::Value *> arguments) {
+ StringRef callee,
+ ArrayRef<mlir::ValuePtr> arguments) {
// Generic call always returns an unranked Tensor initially.
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(arguments);
@@ -185,7 +186,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); }
// MulOp
void MulOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -236,7 +237,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
// TransposeOp
void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *value) {
+ mlir::ValuePtr value) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(value);
}
diff --git a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
index da474e809b3..902c634a954 100644
--- a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
@@ -99,7 +99,7 @@ private:
/// Entering a function creates a new scope, and the function arguments are
/// added to the mapping. When the processing of a function is terminated, the
/// scope is destroyed and the mappings created in this scope are dropped.
- llvm::ScopedHashTable<StringRef, mlir::Value *> symbolTable;
+ llvm::ScopedHashTable<StringRef, mlir::ValuePtr> symbolTable;
/// Helper conversion for a Toy AST location to an MLIR location.
mlir::Location loc(Location loc) {
@@ -109,7 +109,7 @@ private:
/// Declare a variable in the current scope, return success if the variable
/// wasn't declared yet.
- mlir::LogicalResult declare(llvm::StringRef var, mlir::Value *value) {
+ mlir::LogicalResult declare(llvm::StringRef var, mlir::ValuePtr value) {
if (symbolTable.count(var))
return mlir::failure();
symbolTable.insert(var, value);
@@ -132,7 +132,8 @@ private:
/// Emit a new function and add it to the MLIR module.
mlir::FuncOp mlirGen(FunctionAST &funcAST) {
// Create a scope in the symbol table to hold variable declarations.
- ScopedHashTableScope<llvm::StringRef, mlir::Value *> var_scope(symbolTable);
+ ScopedHashTableScope<llvm::StringRef, mlir::ValuePtr> var_scope(
+ symbolTable);
// Create an MLIR function for the given prototype.
mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -183,7 +184,7 @@ private:
}
/// Emit a binary operation
- mlir::Value *mlirGen(BinaryExprAST &binop) {
+ mlir::ValuePtr mlirGen(BinaryExprAST &binop) {
// First emit the operations for each side of the operation before emitting
// the operation itself. For example if the expression is `a + foo(a)`
// 1) First it will visiting the LHS, which will return a reference to the
@@ -195,10 +196,10 @@ private:
// and the result value is returned. If an error occurs we get a nullptr
// and propagate.
//
- mlir::Value *lhs = mlirGen(*binop.getLHS());
+ mlir::ValuePtr lhs = mlirGen(*binop.getLHS());
if (!lhs)
return nullptr;
- mlir::Value *rhs = mlirGen(*binop.getRHS());
+ mlir::ValuePtr rhs = mlirGen(*binop.getRHS());
if (!rhs)
return nullptr;
auto location = loc(binop.loc());
@@ -219,8 +220,8 @@ private:
/// This is a reference to a variable in an expression. The variable is
/// expected to have been declared and so should have a value in the symbol
/// table, otherwise emit an error and return nullptr.
- mlir::Value *mlirGen(VariableExprAST &expr) {
- if (auto *variable = symbolTable.lookup(expr.getName()))
+ mlir::ValuePtr mlirGen(VariableExprAST &expr) {
+ if (auto variable = symbolTable.lookup(expr.getName()))
return variable;
emitError(loc(expr.loc()), "error: unknown variable '")
@@ -233,7 +234,7 @@ private:
auto location = loc(ret.loc());
// 'return' takes an optional expression, handle that case here.
- mlir::Value *expr = nullptr;
+ mlir::ValuePtr expr = nullptr;
if (ret.getExpr().hasValue()) {
if (!(expr = mlirGen(*ret.getExpr().getValue())))
return mlir::failure();
@@ -241,7 +242,7 @@ private:
// Otherwise, this return operation has zero operands.
builder.create<ReturnOp>(location, expr ? makeArrayRef(expr)
- : ArrayRef<mlir::Value *>());
+ : ArrayRef<mlir::ValuePtr>());
return mlir::success();
}
@@ -263,7 +264,7 @@ private:
/// [[1.000000e+00, 2.000000e+00, 3.000000e+00],
/// [4.000000e+00, 5.000000e+00, 6.000000e+00]]>} : () -> tensor<2x3xf64>
///
- mlir::Value *mlirGen(LiteralExprAST &lit) {
+ mlir::ValuePtr mlirGen(LiteralExprAST &lit) {
auto type = getType(lit.getDims());
// The attribute is a vector with a floating point value per element
@@ -309,14 +310,14 @@ private:
/// Emit a call expression. It emits specific operations for the `transpose`
/// builtin. Other identifiers are assumed to be user-defined functions.
- mlir::Value *mlirGen(CallExprAST &call) {
+ mlir::ValuePtr mlirGen(CallExprAST &call) {
llvm::StringRef callee = call.getCallee();
auto location = loc(call.loc());
// Codegen the operands first.
- SmallVector<mlir::Value *, 4> operands;
+ SmallVector<mlir::ValuePtr, 4> operands;
for (auto &expr : call.getArgs()) {
- auto *arg = mlirGen(*expr);
+ auto arg = mlirGen(*expr);
if (!arg)
return nullptr;
operands.push_back(arg);
@@ -342,7 +343,7 @@ private:
/// Emit a print expression. It emits specific operations for two builtins:
/// transpose(x) and print(x).
mlir::LogicalResult mlirGen(PrintExprAST &call) {
- auto *arg = mlirGen(*call.getArg());
+ auto arg = mlirGen(*call.getArg());
if (!arg)
return mlir::failure();
@@ -351,12 +352,12 @@ private:
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
- mlir::Value *mlirGen(NumberExprAST &num) {
+ mlir::ValuePtr mlirGen(NumberExprAST &num) {
return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
- mlir::Value *mlirGen(ExprAST &expr) {
+ mlir::ValuePtr mlirGen(ExprAST &expr) {
switch (expr.getKind()) {
case toy::ExprAST::Expr_BinOp:
return mlirGen(cast<BinaryExprAST>(expr));
@@ -380,7 +381,7 @@ private:
/// initializer and record the value in the symbol table before returning it.
/// Future expressions will be able to reference this variable through symbol
/// table lookup.
- mlir::Value *mlirGen(VarDeclExprAST &vardecl) {
+ mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) {
auto init = vardecl.getInitVal();
if (!init) {
emitError(loc(vardecl.loc()),
@@ -388,7 +389,7 @@ private:
return nullptr;
}
- mlir::Value *value = mlirGen(*init);
+ mlir::ValuePtr value = mlirGen(*init);
if (!value)
return nullptr;
@@ -408,7 +409,7 @@ private:
/// Codegen a list of expression, return failure if one of them hit an error.
mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
- ScopedHashTableScope<StringRef, mlir::Value *> var_scope(symbolTable);
+ ScopedHashTableScope<StringRef, mlir::ValuePtr> var_scope(symbolTable);
for (auto &expr : blockAST) {
// Specific handling for variable declarations, return statement, and
// print. These can only appear in block list and not in nested
diff --git a/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp
index 47e1abc6c74..604e9fa6c83 100644
--- a/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp
@@ -53,7 +53,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
matchAndRewrite(TransposeOp op,
mlir::PatternRewriter &rewriter) const override {
// Look through the input of the current transpose.
- mlir::Value *transposeInput = op.getOperand();
+ mlir::ValuePtr transposeInput = op.getOperand();
TransposeOp transposeInputOp =
llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
diff --git a/mlir/examples/toy/Ch5/include/toy/Ops.td b/mlir/examples/toy/Ch5/include/toy/Ops.td
index e40b661fd34..b3bda1d647b 100644
--- a/mlir/examples/toy/Ch5/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch5/include/toy/Ops.td
@@ -100,7 +100,7 @@ def AddOp : Toy_Op<"add",
// Allow building an AddOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -151,7 +151,7 @@ def GenericCallOp : Toy_Op<"generic_call",
// Add custom build methods for the generic call operation.
let builders = [
OpBuilder<"Builder *builder, OperationState &state, "
- "StringRef callee, ArrayRef<Value *> arguments">
+ "StringRef callee, ArrayRef<ValuePtr> arguments">
];
}
@@ -168,7 +168,7 @@ def MulOp : Toy_Op<"mul",
// Allow building a MulOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -246,7 +246,7 @@ def TransposeOp : Toy_Op<"transpose",
// Allow building a TransposeOp with from the input operand.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *input">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr input">
];
// Invoke a static verify method to verify this transpose operation.
diff --git a/mlir/examples/toy/Ch5/mlir/Dialect.cpp b/mlir/examples/toy/Ch5/mlir/Dialect.cpp
index 7003cbdcc81..8be1094cf15 100644
--- a/mlir/examples/toy/Ch5/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch5/mlir/Dialect.cpp
@@ -55,7 +55,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
/// Handle the given inlined terminator(toy.return) by replacing it with a new
/// operation as necessary.
void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const final {
+ ArrayRef<ValuePtr> valuesToRepl) const final {
// Only "toy.return" needs to be handled here.
auto returnOp = cast<ReturnOp>(op);
@@ -70,7 +70,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
/// operation that takes 'input' as the only operand, and produces a single
/// result of 'resultType'. If a conversion can not be generated, nullptr
/// should be returned.
- Operation *materializeCallConversion(OpBuilder &builder, Value *input,
+ Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input,
Type resultType,
Location conversionLoc) const final {
return builder.create<CastOp>(conversionLoc, resultType, input);
@@ -144,7 +144,7 @@ static mlir::LogicalResult verify(ConstantOp op) {
// AddOp
void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -164,7 +164,8 @@ void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
// GenericCallOp
void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state,
- StringRef callee, ArrayRef<mlir::Value *> arguments) {
+ StringRef callee,
+ ArrayRef<mlir::ValuePtr> arguments) {
// Generic call always returns an unranked Tensor initially.
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(arguments);
@@ -185,7 +186,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); }
// MulOp
void MulOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -236,7 +237,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
// TransposeOp
void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *value) {
+ mlir::ValuePtr value) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(value);
}
diff --git a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
index 4ab8c5b501c..3fa761c7404 100644
--- a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
@@ -43,8 +43,8 @@ static MemRefType convertTensorToMemRef(TensorType type) {
}
/// Insert an allocation and deallocation for the given MemRefType.
-static Value *insertAllocAndDealloc(MemRefType type, Location loc,
- PatternRewriter &rewriter) {
+static ValuePtr insertAllocAndDealloc(MemRefType type, Location loc,
+ PatternRewriter &rewriter) {
auto alloc = rewriter.create<AllocOp>(loc, type);
// Make sure to allocate at the beginning of the block.
@@ -63,11 +63,11 @@ static Value *insertAllocAndDealloc(MemRefType type, Location loc,
/// to the operands of the input operation, and the set of loop induction
/// variables for the iteration. It returns a value to store at the current
/// index of the iteration.
-using LoopIterationFn = function_ref<Value *(PatternRewriter &rewriter,
- ArrayRef<Value *> memRefOperands,
- ArrayRef<Value *> loopIvs)>;
+using LoopIterationFn = function_ref<ValuePtr(PatternRewriter &rewriter,
+ ArrayRef<ValuePtr> memRefOperands,
+ ArrayRef<ValuePtr> loopIvs)>;
-static void lowerOpToLoops(Operation *op, ArrayRef<Value *> operands,
+static void lowerOpToLoops(Operation *op, ArrayRef<ValuePtr> operands,
PatternRewriter &rewriter,
LoopIterationFn processIteration) {
auto tensorType = (*op->result_type_begin()).cast<TensorType>();
@@ -78,7 +78,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef<Value *> operands,
auto alloc = insertAllocAndDealloc(memRefType, loc, rewriter);
// Create an empty affine loop for each of the dimensions within the shape.
- SmallVector<Value *, 4> loopIvs;
+ SmallVector<ValuePtr, 4> loopIvs;
for (auto dim : tensorType.getShape()) {
auto loop = rewriter.create<AffineForOp>(loc, /*lb=*/0, dim, /*step=*/1);
loop.getBody()->clear();
@@ -94,7 +94,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef<Value *> operands,
// Generate a call to the processing function with the rewriter, the memref
// operands, and the loop induction variables. This function will return the
// value to store at the current index.
- Value *valueToStore = processIteration(rewriter, operands, loopIvs);
+ ValuePtr valueToStore = processIteration(rewriter, operands, loopIvs);
rewriter.create<AffineStoreOp>(loc, valueToStore, alloc,
llvm::makeArrayRef(loopIvs));
@@ -113,13 +113,13 @@ struct BinaryOpLowering : public ConversionPattern {
: ConversionPattern(BinaryOp::getOperationName(), 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
lowerOpToLoops(
op, operands, rewriter,
- [loc](PatternRewriter &rewriter, ArrayRef<Value *> memRefOperands,
- ArrayRef<Value *> loopIvs) {
+ [loc](PatternRewriter &rewriter, ArrayRef<ValuePtr> memRefOperands,
+ ArrayRef<ValuePtr> loopIvs) {
// Generate an adaptor for the remapped operands of the BinaryOp. This
// allows for using the nice named accessors that are generated by the
// ODS.
@@ -163,7 +163,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
// Create these constants up-front to avoid large amounts of redundant
// operations.
auto valueShape = memRefType.getShape();
- SmallVector<Value *, 8> constantIndices;
+ SmallVector<ValuePtr, 8> constantIndices;
for (auto i : llvm::seq<int64_t>(
0, *std::max_element(valueShape.begin(), valueShape.end())))
constantIndices.push_back(rewriter.create<ConstantIndexOp>(loc, i));
@@ -172,7 +172,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
// will need to generate a store for each of the elements. The following
// functor recursively walks the dimensions of the constant shape,
// generating a store when the recursion hits the base case.
- SmallVector<Value *, 2> indices;
+ SmallVector<ValuePtr, 2> indices;
auto valueIt = constantValue.getValues<FloatAttr>().begin();
std::function<void(uint64_t)> storeElements = [&](uint64_t dimension) {
// The last dimension is the base case of the recursion, at this point
@@ -231,22 +231,22 @@ struct TransposeOpLowering : public ConversionPattern {
: ConversionPattern(toy::TransposeOp::getOperationName(), 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
lowerOpToLoops(
op, operands, rewriter,
- [loc](PatternRewriter &rewriter, ArrayRef<Value *> memRefOperands,
- ArrayRef<Value *> loopIvs) {
+ [loc](PatternRewriter &rewriter, ArrayRef<ValuePtr> memRefOperands,
+ ArrayRef<ValuePtr> loopIvs) {
// Generate an adaptor for the remapped operands of the TransposeOp.
// This allows for using the nice named accessors that are generated
// by the ODS.
toy::TransposeOpOperandAdaptor transposeAdaptor(memRefOperands);
- Value *input = transposeAdaptor.input();
+ ValuePtr input = transposeAdaptor.input();
// Transpose the elements by generating a load from the reverse
// indices.
- SmallVector<Value *, 2> reverseIvs(llvm::reverse(loopIvs));
+ SmallVector<ValuePtr, 2> reverseIvs(llvm::reverse(loopIvs));
return rewriter.create<AffineLoadOp>(loc, input, reverseIvs);
});
return matchSuccess();
diff --git a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
index da474e809b3..902c634a954 100644
--- a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
@@ -99,7 +99,7 @@ private:
/// Entering a function creates a new scope, and the function arguments are
/// added to the mapping. When the processing of a function is terminated, the
/// scope is destroyed and the mappings created in this scope are dropped.
- llvm::ScopedHashTable<StringRef, mlir::Value *> symbolTable;
+ llvm::ScopedHashTable<StringRef, mlir::ValuePtr> symbolTable;
/// Helper conversion for a Toy AST location to an MLIR location.
mlir::Location loc(Location loc) {
@@ -109,7 +109,7 @@ private:
/// Declare a variable in the current scope, return success if the variable
/// wasn't declared yet.
- mlir::LogicalResult declare(llvm::StringRef var, mlir::Value *value) {
+ mlir::LogicalResult declare(llvm::StringRef var, mlir::ValuePtr value) {
if (symbolTable.count(var))
return mlir::failure();
symbolTable.insert(var, value);
@@ -132,7 +132,8 @@ private:
/// Emit a new function and add it to the MLIR module.
mlir::FuncOp mlirGen(FunctionAST &funcAST) {
// Create a scope in the symbol table to hold variable declarations.
- ScopedHashTableScope<llvm::StringRef, mlir::Value *> var_scope(symbolTable);
+ ScopedHashTableScope<llvm::StringRef, mlir::ValuePtr> var_scope(
+ symbolTable);
// Create an MLIR function for the given prototype.
mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -183,7 +184,7 @@ private:
}
/// Emit a binary operation
- mlir::Value *mlirGen(BinaryExprAST &binop) {
+ mlir::ValuePtr mlirGen(BinaryExprAST &binop) {
// First emit the operations for each side of the operation before emitting
// the operation itself. For example if the expression is `a + foo(a)`
// 1) First it will visiting the LHS, which will return a reference to the
@@ -195,10 +196,10 @@ private:
// and the result value is returned. If an error occurs we get a nullptr
// and propagate.
//
- mlir::Value *lhs = mlirGen(*binop.getLHS());
+ mlir::ValuePtr lhs = mlirGen(*binop.getLHS());
if (!lhs)
return nullptr;
- mlir::Value *rhs = mlirGen(*binop.getRHS());
+ mlir::ValuePtr rhs = mlirGen(*binop.getRHS());
if (!rhs)
return nullptr;
auto location = loc(binop.loc());
@@ -219,8 +220,8 @@ private:
/// This is a reference to a variable in an expression. The variable is
/// expected to have been declared and so should have a value in the symbol
/// table, otherwise emit an error and return nullptr.
- mlir::Value *mlirGen(VariableExprAST &expr) {
- if (auto *variable = symbolTable.lookup(expr.getName()))
+ mlir::ValuePtr mlirGen(VariableExprAST &expr) {
+ if (auto variable = symbolTable.lookup(expr.getName()))
return variable;
emitError(loc(expr.loc()), "error: unknown variable '")
@@ -233,7 +234,7 @@ private:
auto location = loc(ret.loc());
// 'return' takes an optional expression, handle that case here.
- mlir::Value *expr = nullptr;
+ mlir::ValuePtr expr = nullptr;
if (ret.getExpr().hasValue()) {
if (!(expr = mlirGen(*ret.getExpr().getValue())))
return mlir::failure();
@@ -241,7 +242,7 @@ private:
// Otherwise, this return operation has zero operands.
builder.create<ReturnOp>(location, expr ? makeArrayRef(expr)
- : ArrayRef<mlir::Value *>());
+ : ArrayRef<mlir::ValuePtr>());
return mlir::success();
}
@@ -263,7 +264,7 @@ private:
/// [[1.000000e+00, 2.000000e+00, 3.000000e+00],
/// [4.000000e+00, 5.000000e+00, 6.000000e+00]]>} : () -> tensor<2x3xf64>
///
- mlir::Value *mlirGen(LiteralExprAST &lit) {
+ mlir::ValuePtr mlirGen(LiteralExprAST &lit) {
auto type = getType(lit.getDims());
// The attribute is a vector with a floating point value per element
@@ -309,14 +310,14 @@ private:
/// Emit a call expression. It emits specific operations for the `transpose`
/// builtin. Other identifiers are assumed to be user-defined functions.
- mlir::Value *mlirGen(CallExprAST &call) {
+ mlir::ValuePtr mlirGen(CallExprAST &call) {
llvm::StringRef callee = call.getCallee();
auto location = loc(call.loc());
// Codegen the operands first.
- SmallVector<mlir::Value *, 4> operands;
+ SmallVector<mlir::ValuePtr, 4> operands;
for (auto &expr : call.getArgs()) {
- auto *arg = mlirGen(*expr);
+ auto arg = mlirGen(*expr);
if (!arg)
return nullptr;
operands.push_back(arg);
@@ -342,7 +343,7 @@ private:
/// Emit a print expression. It emits specific operations for two builtins:
/// transpose(x) and print(x).
mlir::LogicalResult mlirGen(PrintExprAST &call) {
- auto *arg = mlirGen(*call.getArg());
+ auto arg = mlirGen(*call.getArg());
if (!arg)
return mlir::failure();
@@ -351,12 +352,12 @@ private:
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
- mlir::Value *mlirGen(NumberExprAST &num) {
+ mlir::ValuePtr mlirGen(NumberExprAST &num) {
return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
- mlir::Value *mlirGen(ExprAST &expr) {
+ mlir::ValuePtr mlirGen(ExprAST &expr) {
switch (expr.getKind()) {
case toy::ExprAST::Expr_BinOp:
return mlirGen(cast<BinaryExprAST>(expr));
@@ -380,7 +381,7 @@ private:
/// initializer and record the value in the symbol table before returning it.
/// Future expressions will be able to reference this variable through symbol
/// table lookup.
- mlir::Value *mlirGen(VarDeclExprAST &vardecl) {
+ mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) {
auto init = vardecl.getInitVal();
if (!init) {
emitError(loc(vardecl.loc()),
@@ -388,7 +389,7 @@ private:
return nullptr;
}
- mlir::Value *value = mlirGen(*init);
+ mlir::ValuePtr value = mlirGen(*init);
if (!value)
return nullptr;
@@ -408,7 +409,7 @@ private:
/// Codegen a list of expression, return failure if one of them hit an error.
mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
- ScopedHashTableScope<StringRef, mlir::Value *> var_scope(symbolTable);
+ ScopedHashTableScope<StringRef, mlir::ValuePtr> var_scope(symbolTable);
for (auto &expr : blockAST) {
// Specific handling for variable declarations, return statement, and
// print. These can only appear in block list and not in nested
diff --git a/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp
index 47e1abc6c74..604e9fa6c83 100644
--- a/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp
@@ -53,7 +53,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
matchAndRewrite(TransposeOp op,
mlir::PatternRewriter &rewriter) const override {
// Look through the input of the current transpose.
- mlir::Value *transposeInput = op.getOperand();
+ mlir::ValuePtr transposeInput = op.getOperand();
TransposeOp transposeInputOp =
llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
diff --git a/mlir/examples/toy/Ch6/include/toy/Ops.td b/mlir/examples/toy/Ch6/include/toy/Ops.td
index e40b661fd34..b3bda1d647b 100644
--- a/mlir/examples/toy/Ch6/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch6/include/toy/Ops.td
@@ -100,7 +100,7 @@ def AddOp : Toy_Op<"add",
// Allow building an AddOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -151,7 +151,7 @@ def GenericCallOp : Toy_Op<"generic_call",
// Add custom build methods for the generic call operation.
let builders = [
OpBuilder<"Builder *builder, OperationState &state, "
- "StringRef callee, ArrayRef<Value *> arguments">
+ "StringRef callee, ArrayRef<ValuePtr> arguments">
];
}
@@ -168,7 +168,7 @@ def MulOp : Toy_Op<"mul",
// Allow building a MulOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -246,7 +246,7 @@ def TransposeOp : Toy_Op<"transpose",
// Allow building a TransposeOp with from the input operand.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *input">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr input">
];
// Invoke a static verify method to verify this transpose operation.
diff --git a/mlir/examples/toy/Ch6/mlir/Dialect.cpp b/mlir/examples/toy/Ch6/mlir/Dialect.cpp
index 7003cbdcc81..8be1094cf15 100644
--- a/mlir/examples/toy/Ch6/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch6/mlir/Dialect.cpp
@@ -55,7 +55,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
/// Handle the given inlined terminator(toy.return) by replacing it with a new
/// operation as necessary.
void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const final {
+ ArrayRef<ValuePtr> valuesToRepl) const final {
// Only "toy.return" needs to be handled here.
auto returnOp = cast<ReturnOp>(op);
@@ -70,7 +70,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
/// operation that takes 'input' as the only operand, and produces a single
/// result of 'resultType'. If a conversion can not be generated, nullptr
/// should be returned.
- Operation *materializeCallConversion(OpBuilder &builder, Value *input,
+ Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input,
Type resultType,
Location conversionLoc) const final {
return builder.create<CastOp>(conversionLoc, resultType, input);
@@ -144,7 +144,7 @@ static mlir::LogicalResult verify(ConstantOp op) {
// AddOp
void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -164,7 +164,8 @@ void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
// GenericCallOp
void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state,
- StringRef callee, ArrayRef<mlir::Value *> arguments) {
+ StringRef callee,
+ ArrayRef<mlir::ValuePtr> arguments) {
// Generic call always returns an unranked Tensor initially.
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(arguments);
@@ -185,7 +186,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); }
// MulOp
void MulOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -236,7 +237,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
// TransposeOp
void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *value) {
+ mlir::ValuePtr value) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(value);
}
diff --git a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
index 4ab8c5b501c..3fa761c7404 100644
--- a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
@@ -43,8 +43,8 @@ static MemRefType convertTensorToMemRef(TensorType type) {
}
/// Insert an allocation and deallocation for the given MemRefType.
-static Value *insertAllocAndDealloc(MemRefType type, Location loc,
- PatternRewriter &rewriter) {
+static ValuePtr insertAllocAndDealloc(MemRefType type, Location loc,
+ PatternRewriter &rewriter) {
auto alloc = rewriter.create<AllocOp>(loc, type);
// Make sure to allocate at the beginning of the block.
@@ -63,11 +63,11 @@ static Value *insertAllocAndDealloc(MemRefType type, Location loc,
/// to the operands of the input operation, and the set of loop induction
/// variables for the iteration. It returns a value to store at the current
/// index of the iteration.
-using LoopIterationFn = function_ref<Value *(PatternRewriter &rewriter,
- ArrayRef<Value *> memRefOperands,
- ArrayRef<Value *> loopIvs)>;
+using LoopIterationFn = function_ref<ValuePtr(PatternRewriter &rewriter,
+ ArrayRef<ValuePtr> memRefOperands,
+ ArrayRef<ValuePtr> loopIvs)>;
-static void lowerOpToLoops(Operation *op, ArrayRef<Value *> operands,
+static void lowerOpToLoops(Operation *op, ArrayRef<ValuePtr> operands,
PatternRewriter &rewriter,
LoopIterationFn processIteration) {
auto tensorType = (*op->result_type_begin()).cast<TensorType>();
@@ -78,7 +78,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef<Value *> operands,
auto alloc = insertAllocAndDealloc(memRefType, loc, rewriter);
// Create an empty affine loop for each of the dimensions within the shape.
- SmallVector<Value *, 4> loopIvs;
+ SmallVector<ValuePtr, 4> loopIvs;
for (auto dim : tensorType.getShape()) {
auto loop = rewriter.create<AffineForOp>(loc, /*lb=*/0, dim, /*step=*/1);
loop.getBody()->clear();
@@ -94,7 +94,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef<Value *> operands,
// Generate a call to the processing function with the rewriter, the memref
// operands, and the loop induction variables. This function will return the
// value to store at the current index.
- Value *valueToStore = processIteration(rewriter, operands, loopIvs);
+ ValuePtr valueToStore = processIteration(rewriter, operands, loopIvs);
rewriter.create<AffineStoreOp>(loc, valueToStore, alloc,
llvm::makeArrayRef(loopIvs));
@@ -113,13 +113,13 @@ struct BinaryOpLowering : public ConversionPattern {
: ConversionPattern(BinaryOp::getOperationName(), 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
lowerOpToLoops(
op, operands, rewriter,
- [loc](PatternRewriter &rewriter, ArrayRef<Value *> memRefOperands,
- ArrayRef<Value *> loopIvs) {
+ [loc](PatternRewriter &rewriter, ArrayRef<ValuePtr> memRefOperands,
+ ArrayRef<ValuePtr> loopIvs) {
// Generate an adaptor for the remapped operands of the BinaryOp. This
// allows for using the nice named accessors that are generated by the
// ODS.
@@ -163,7 +163,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
// Create these constants up-front to avoid large amounts of redundant
// operations.
auto valueShape = memRefType.getShape();
- SmallVector<Value *, 8> constantIndices;
+ SmallVector<ValuePtr, 8> constantIndices;
for (auto i : llvm::seq<int64_t>(
0, *std::max_element(valueShape.begin(), valueShape.end())))
constantIndices.push_back(rewriter.create<ConstantIndexOp>(loc, i));
@@ -172,7 +172,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
// will need to generate a store for each of the elements. The following
// functor recursively walks the dimensions of the constant shape,
// generating a store when the recursion hits the base case.
- SmallVector<Value *, 2> indices;
+ SmallVector<ValuePtr, 2> indices;
auto valueIt = constantValue.getValues<FloatAttr>().begin();
std::function<void(uint64_t)> storeElements = [&](uint64_t dimension) {
// The last dimension is the base case of the recursion, at this point
@@ -231,22 +231,22 @@ struct TransposeOpLowering : public ConversionPattern {
: ConversionPattern(toy::TransposeOp::getOperationName(), 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
lowerOpToLoops(
op, operands, rewriter,
- [loc](PatternRewriter &rewriter, ArrayRef<Value *> memRefOperands,
- ArrayRef<Value *> loopIvs) {
+ [loc](PatternRewriter &rewriter, ArrayRef<ValuePtr> memRefOperands,
+ ArrayRef<ValuePtr> loopIvs) {
// Generate an adaptor for the remapped operands of the TransposeOp.
// This allows for using the nice named accessors that are generated
// by the ODS.
toy::TransposeOpOperandAdaptor transposeAdaptor(memRefOperands);
- Value *input = transposeAdaptor.input();
+ ValuePtr input = transposeAdaptor.input();
// Transpose the elements by generating a load from the reverse
// indices.
- SmallVector<Value *, 2> reverseIvs(llvm::reverse(loopIvs));
+ SmallVector<ValuePtr, 2> reverseIvs(llvm::reverse(loopIvs));
return rewriter.create<AffineLoadOp>(loc, input, reverseIvs);
});
return matchSuccess();
diff --git a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
index d35cc5c576a..c3180b4a92d 100644
--- a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
+++ b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
@@ -51,7 +51,7 @@ public:
: ConversionPattern(toy::PrintOp::getOperationName(), 1, context) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto memRefType = (*op->operand_type_begin()).cast<MemRefType>();
auto memRefShape = memRefType.getShape();
@@ -64,14 +64,14 @@ public:
// Get a symbol reference to the printf function, inserting it if necessary.
auto printfRef = getOrInsertPrintf(rewriter, parentModule, llvmDialect);
- Value *formatSpecifierCst = getOrCreateGlobalString(
+ ValuePtr formatSpecifierCst = getOrCreateGlobalString(
loc, rewriter, "frmt_spec", StringRef("%f \0", 4), parentModule,
llvmDialect);
- Value *newLineCst = getOrCreateGlobalString(
+ ValuePtr newLineCst = getOrCreateGlobalString(
loc, rewriter, "nl", StringRef("\n\0", 2), parentModule, llvmDialect);
// Create a loop for each of the dimensions within the shape.
- SmallVector<Value *, 4> loopIvs;
+ SmallVector<ValuePtr, 4> loopIvs;
for (unsigned i = 0, e = memRefShape.size(); i != e; ++i) {
auto lowerBound = rewriter.create<ConstantIndexOp>(loc, 0);
auto upperBound = rewriter.create<ConstantIndexOp>(loc, memRefShape[i]);
@@ -97,7 +97,7 @@ public:
auto elementLoad = rewriter.create<LoadOp>(loc, printOp.input(), loopIvs);
rewriter.create<CallOp>(
loc, printfRef, rewriter.getIntegerType(32),
- ArrayRef<Value *>({formatSpecifierCst, elementLoad}));
+ ArrayRef<ValuePtr>({formatSpecifierCst, elementLoad}));
// Notify the rewriter that this operation has been removed.
rewriter.eraseOp(op);
@@ -130,10 +130,10 @@ private:
/// Return a value representing an access into a global string with the given
/// name, creating the string if necessary.
- static Value *getOrCreateGlobalString(Location loc, OpBuilder &builder,
- StringRef name, StringRef value,
- ModuleOp module,
- LLVM::LLVMDialect *llvmDialect) {
+ static ValuePtr getOrCreateGlobalString(Location loc, OpBuilder &builder,
+ StringRef name, StringRef value,
+ ModuleOp module,
+ LLVM::LLVMDialect *llvmDialect) {
// Create the global at the entry of the module.
LLVM::GlobalOp global;
if (!(global = module.lookupSymbol<LLVM::GlobalOp>(name))) {
@@ -147,13 +147,13 @@ private:
}
// Get the pointer to the first character in the global string.
- Value *globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
- Value *cst0 = builder.create<LLVM::ConstantOp>(
+ ValuePtr globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
+ ValuePtr cst0 = builder.create<LLVM::ConstantOp>(
loc, LLVM::LLVMType::getInt64Ty(llvmDialect),
builder.getIntegerAttr(builder.getIndexType(), 0));
return builder.create<LLVM::GEPOp>(
loc, LLVM::LLVMType::getInt8PtrTy(llvmDialect), globalPtr,
- ArrayRef<Value *>({cst0, cst0}));
+ ArrayRef<ValuePtr>({cst0, cst0}));
}
};
} // end anonymous namespace
diff --git a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
index da474e809b3..902c634a954 100644
--- a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
@@ -99,7 +99,7 @@ private:
/// Entering a function creates a new scope, and the function arguments are
/// added to the mapping. When the processing of a function is terminated, the
/// scope is destroyed and the mappings created in this scope are dropped.
- llvm::ScopedHashTable<StringRef, mlir::Value *> symbolTable;
+ llvm::ScopedHashTable<StringRef, mlir::ValuePtr> symbolTable;
/// Helper conversion for a Toy AST location to an MLIR location.
mlir::Location loc(Location loc) {
@@ -109,7 +109,7 @@ private:
/// Declare a variable in the current scope, return success if the variable
/// wasn't declared yet.
- mlir::LogicalResult declare(llvm::StringRef var, mlir::Value *value) {
+ mlir::LogicalResult declare(llvm::StringRef var, mlir::ValuePtr value) {
if (symbolTable.count(var))
return mlir::failure();
symbolTable.insert(var, value);
@@ -132,7 +132,8 @@ private:
/// Emit a new function and add it to the MLIR module.
mlir::FuncOp mlirGen(FunctionAST &funcAST) {
// Create a scope in the symbol table to hold variable declarations.
- ScopedHashTableScope<llvm::StringRef, mlir::Value *> var_scope(symbolTable);
+ ScopedHashTableScope<llvm::StringRef, mlir::ValuePtr> var_scope(
+ symbolTable);
// Create an MLIR function for the given prototype.
mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -183,7 +184,7 @@ private:
}
/// Emit a binary operation
- mlir::Value *mlirGen(BinaryExprAST &binop) {
+ mlir::ValuePtr mlirGen(BinaryExprAST &binop) {
// First emit the operations for each side of the operation before emitting
// the operation itself. For example if the expression is `a + foo(a)`
// 1) First it will visiting the LHS, which will return a reference to the
@@ -195,10 +196,10 @@ private:
// and the result value is returned. If an error occurs we get a nullptr
// and propagate.
//
- mlir::Value *lhs = mlirGen(*binop.getLHS());
+ mlir::ValuePtr lhs = mlirGen(*binop.getLHS());
if (!lhs)
return nullptr;
- mlir::Value *rhs = mlirGen(*binop.getRHS());
+ mlir::ValuePtr rhs = mlirGen(*binop.getRHS());
if (!rhs)
return nullptr;
auto location = loc(binop.loc());
@@ -219,8 +220,8 @@ private:
/// This is a reference to a variable in an expression. The variable is
/// expected to have been declared and so should have a value in the symbol
/// table, otherwise emit an error and return nullptr.
- mlir::Value *mlirGen(VariableExprAST &expr) {
- if (auto *variable = symbolTable.lookup(expr.getName()))
+ mlir::ValuePtr mlirGen(VariableExprAST &expr) {
+ if (auto variable = symbolTable.lookup(expr.getName()))
return variable;
emitError(loc(expr.loc()), "error: unknown variable '")
@@ -233,7 +234,7 @@ private:
auto location = loc(ret.loc());
// 'return' takes an optional expression, handle that case here.
- mlir::Value *expr = nullptr;
+ mlir::ValuePtr expr = nullptr;
if (ret.getExpr().hasValue()) {
if (!(expr = mlirGen(*ret.getExpr().getValue())))
return mlir::failure();
@@ -241,7 +242,7 @@ private:
// Otherwise, this return operation has zero operands.
builder.create<ReturnOp>(location, expr ? makeArrayRef(expr)
- : ArrayRef<mlir::Value *>());
+ : ArrayRef<mlir::ValuePtr>());
return mlir::success();
}
@@ -263,7 +264,7 @@ private:
/// [[1.000000e+00, 2.000000e+00, 3.000000e+00],
/// [4.000000e+00, 5.000000e+00, 6.000000e+00]]>} : () -> tensor<2x3xf64>
///
- mlir::Value *mlirGen(LiteralExprAST &lit) {
+ mlir::ValuePtr mlirGen(LiteralExprAST &lit) {
auto type = getType(lit.getDims());
// The attribute is a vector with a floating point value per element
@@ -309,14 +310,14 @@ private:
/// Emit a call expression. It emits specific operations for the `transpose`
/// builtin. Other identifiers are assumed to be user-defined functions.
- mlir::Value *mlirGen(CallExprAST &call) {
+ mlir::ValuePtr mlirGen(CallExprAST &call) {
llvm::StringRef callee = call.getCallee();
auto location = loc(call.loc());
// Codegen the operands first.
- SmallVector<mlir::Value *, 4> operands;
+ SmallVector<mlir::ValuePtr, 4> operands;
for (auto &expr : call.getArgs()) {
- auto *arg = mlirGen(*expr);
+ auto arg = mlirGen(*expr);
if (!arg)
return nullptr;
operands.push_back(arg);
@@ -342,7 +343,7 @@ private:
/// Emit a print expression. It emits specific operations for two builtins:
/// transpose(x) and print(x).
mlir::LogicalResult mlirGen(PrintExprAST &call) {
- auto *arg = mlirGen(*call.getArg());
+ auto arg = mlirGen(*call.getArg());
if (!arg)
return mlir::failure();
@@ -351,12 +352,12 @@ private:
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
- mlir::Value *mlirGen(NumberExprAST &num) {
+ mlir::ValuePtr mlirGen(NumberExprAST &num) {
return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
- mlir::Value *mlirGen(ExprAST &expr) {
+ mlir::ValuePtr mlirGen(ExprAST &expr) {
switch (expr.getKind()) {
case toy::ExprAST::Expr_BinOp:
return mlirGen(cast<BinaryExprAST>(expr));
@@ -380,7 +381,7 @@ private:
/// initializer and record the value in the symbol table before returning it.
/// Future expressions will be able to reference this variable through symbol
/// table lookup.
- mlir::Value *mlirGen(VarDeclExprAST &vardecl) {
+ mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) {
auto init = vardecl.getInitVal();
if (!init) {
emitError(loc(vardecl.loc()),
@@ -388,7 +389,7 @@ private:
return nullptr;
}
- mlir::Value *value = mlirGen(*init);
+ mlir::ValuePtr value = mlirGen(*init);
if (!value)
return nullptr;
@@ -408,7 +409,7 @@ private:
/// Codegen a list of expression, return failure if one of them hit an error.
mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
- ScopedHashTableScope<StringRef, mlir::Value *> var_scope(symbolTable);
+ ScopedHashTableScope<StringRef, mlir::ValuePtr> var_scope(symbolTable);
for (auto &expr : blockAST) {
// Specific handling for variable declarations, return statement, and
// print. These can only appear in block list and not in nested
diff --git a/mlir/examples/toy/Ch6/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch6/mlir/ToyCombine.cpp
index 47e1abc6c74..604e9fa6c83 100644
--- a/mlir/examples/toy/Ch6/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch6/mlir/ToyCombine.cpp
@@ -53,7 +53,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
matchAndRewrite(TransposeOp op,
mlir::PatternRewriter &rewriter) const override {
// Look through the input of the current transpose.
- mlir::Value *transposeInput = op.getOperand();
+ mlir::ValuePtr transposeInput = op.getOperand();
TransposeOp transposeInputOp =
llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
diff --git a/mlir/examples/toy/Ch7/include/toy/Ops.td b/mlir/examples/toy/Ch7/include/toy/Ops.td
index 0d48f74e9fe..94f1bcf3e82 100644
--- a/mlir/examples/toy/Ch7/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch7/include/toy/Ops.td
@@ -112,7 +112,7 @@ def AddOp : Toy_Op<"add",
// Allow building an AddOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -164,7 +164,7 @@ def GenericCallOp : Toy_Op<"generic_call",
// Add custom build methods for the generic call operation.
let builders = [
OpBuilder<"Builder *builder, OperationState &state, "
- "StringRef callee, ArrayRef<Value *> arguments">
+ "StringRef callee, ArrayRef<ValuePtr> arguments">
];
}
@@ -181,7 +181,7 @@ def MulOp : Toy_Op<"mul",
// Allow building a MulOp with from the two input operands.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs">
];
}
@@ -260,7 +260,7 @@ def StructAccessOp : Toy_Op<"struct_access", [NoSideEffect]> {
// Allow building a StructAccessOp with just a struct value and an index.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *input, size_t index">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr input, size_t index">
];
let verifier = [{ return ::verify(*this); }];
@@ -299,7 +299,7 @@ def TransposeOp : Toy_Op<"transpose",
// Allow building a TransposeOp with from the input operand.
let builders = [
- OpBuilder<"Builder *b, OperationState &state, Value *input">
+ OpBuilder<"Builder *b, OperationState &state, ValuePtr input">
];
// Invoke a static verify method to verify this transpose operation.
diff --git a/mlir/examples/toy/Ch7/mlir/Dialect.cpp b/mlir/examples/toy/Ch7/mlir/Dialect.cpp
index 2beaa870a89..0ce896db5de 100644
--- a/mlir/examples/toy/Ch7/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch7/mlir/Dialect.cpp
@@ -56,7 +56,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
/// Handle the given inlined terminator(toy.return) by replacing it with a new
/// operation as necessary.
void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const final {
+ ArrayRef<ValuePtr> valuesToRepl) const final {
// Only "toy.return" needs to be handled here.
auto returnOp = cast<ReturnOp>(op);
@@ -71,7 +71,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
/// operation that takes 'input' as the only operand, and produces a single
/// result of 'resultType'. If a conversion can not be generated, nullptr
/// should be returned.
- Operation *materializeCallConversion(OpBuilder &builder, Value *input,
+ Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input,
Type resultType,
Location conversionLoc) const final {
return builder.create<CastOp>(conversionLoc, resultType, input);
@@ -195,7 +195,7 @@ void ConstantOp::inferShapes() { getResult()->setType(value().getType()); }
// AddOp
void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -215,7 +215,8 @@ void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
// GenericCallOp
void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state,
- StringRef callee, ArrayRef<mlir::Value *> arguments) {
+ StringRef callee,
+ ArrayRef<mlir::ValuePtr> arguments) {
// Generic call always returns an unranked Tensor initially.
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(arguments);
@@ -236,7 +237,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); }
// MulOp
void MulOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *lhs, mlir::Value *rhs) {
+ mlir::ValuePtr lhs, mlir::ValuePtr rhs) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@@ -287,7 +288,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
// StructAccessOp
void StructAccessOp::build(mlir::Builder *b, mlir::OperationState &state,
- mlir::Value *input, size_t index) {
+ mlir::ValuePtr input, size_t index) {
// Extract the result type from the input type.
StructType structTy = input->getType().cast<StructType>();
assert(index < structTy.getNumElementTypes());
@@ -314,7 +315,7 @@ static mlir::LogicalResult verify(StructAccessOp op) {
// TransposeOp
void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
- mlir::Value *value) {
+ mlir::ValuePtr value) {
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(value);
}
diff --git a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
index 4ab8c5b501c..3fa761c7404 100644
--- a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
@@ -43,8 +43,8 @@ static MemRefType convertTensorToMemRef(TensorType type) {
}
/// Insert an allocation and deallocation for the given MemRefType.
-static Value *insertAllocAndDealloc(MemRefType type, Location loc,
- PatternRewriter &rewriter) {
+static ValuePtr insertAllocAndDealloc(MemRefType type, Location loc,
+ PatternRewriter &rewriter) {
auto alloc = rewriter.create<AllocOp>(loc, type);
// Make sure to allocate at the beginning of the block.
@@ -63,11 +63,11 @@ static Value *insertAllocAndDealloc(MemRefType type, Location loc,
/// to the operands of the input operation, and the set of loop induction
/// variables for the iteration. It returns a value to store at the current
/// index of the iteration.
-using LoopIterationFn = function_ref<Value *(PatternRewriter &rewriter,
- ArrayRef<Value *> memRefOperands,
- ArrayRef<Value *> loopIvs)>;
+using LoopIterationFn = function_ref<ValuePtr(PatternRewriter &rewriter,
+ ArrayRef<ValuePtr> memRefOperands,
+ ArrayRef<ValuePtr> loopIvs)>;
-static void lowerOpToLoops(Operation *op, ArrayRef<Value *> operands,
+static void lowerOpToLoops(Operation *op, ArrayRef<ValuePtr> operands,
PatternRewriter &rewriter,
LoopIterationFn processIteration) {
auto tensorType = (*op->result_type_begin()).cast<TensorType>();
@@ -78,7 +78,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef<Value *> operands,
auto alloc = insertAllocAndDealloc(memRefType, loc, rewriter);
// Create an empty affine loop for each of the dimensions within the shape.
- SmallVector<Value *, 4> loopIvs;
+ SmallVector<ValuePtr, 4> loopIvs;
for (auto dim : tensorType.getShape()) {
auto loop = rewriter.create<AffineForOp>(loc, /*lb=*/0, dim, /*step=*/1);
loop.getBody()->clear();
@@ -94,7 +94,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef<Value *> operands,
// Generate a call to the processing function with the rewriter, the memref
// operands, and the loop induction variables. This function will return the
// value to store at the current index.
- Value *valueToStore = processIteration(rewriter, operands, loopIvs);
+ ValuePtr valueToStore = processIteration(rewriter, operands, loopIvs);
rewriter.create<AffineStoreOp>(loc, valueToStore, alloc,
llvm::makeArrayRef(loopIvs));
@@ -113,13 +113,13 @@ struct BinaryOpLowering : public ConversionPattern {
: ConversionPattern(BinaryOp::getOperationName(), 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
lowerOpToLoops(
op, operands, rewriter,
- [loc](PatternRewriter &rewriter, ArrayRef<Value *> memRefOperands,
- ArrayRef<Value *> loopIvs) {
+ [loc](PatternRewriter &rewriter, ArrayRef<ValuePtr> memRefOperands,
+ ArrayRef<ValuePtr> loopIvs) {
// Generate an adaptor for the remapped operands of the BinaryOp. This
// allows for using the nice named accessors that are generated by the
// ODS.
@@ -163,7 +163,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
// Create these constants up-front to avoid large amounts of redundant
// operations.
auto valueShape = memRefType.getShape();
- SmallVector<Value *, 8> constantIndices;
+ SmallVector<ValuePtr, 8> constantIndices;
for (auto i : llvm::seq<int64_t>(
0, *std::max_element(valueShape.begin(), valueShape.end())))
constantIndices.push_back(rewriter.create<ConstantIndexOp>(loc, i));
@@ -172,7 +172,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
// will need to generate a store for each of the elements. The following
// functor recursively walks the dimensions of the constant shape,
// generating a store when the recursion hits the base case.
- SmallVector<Value *, 2> indices;
+ SmallVector<ValuePtr, 2> indices;
auto valueIt = constantValue.getValues<FloatAttr>().begin();
std::function<void(uint64_t)> storeElements = [&](uint64_t dimension) {
// The last dimension is the base case of the recursion, at this point
@@ -231,22 +231,22 @@ struct TransposeOpLowering : public ConversionPattern {
: ConversionPattern(toy::TransposeOp::getOperationName(), 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
lowerOpToLoops(
op, operands, rewriter,
- [loc](PatternRewriter &rewriter, ArrayRef<Value *> memRefOperands,
- ArrayRef<Value *> loopIvs) {
+ [loc](PatternRewriter &rewriter, ArrayRef<ValuePtr> memRefOperands,
+ ArrayRef<ValuePtr> loopIvs) {
// Generate an adaptor for the remapped operands of the TransposeOp.
// This allows for using the nice named accessors that are generated
// by the ODS.
toy::TransposeOpOperandAdaptor transposeAdaptor(memRefOperands);
- Value *input = transposeAdaptor.input();
+ ValuePtr input = transposeAdaptor.input();
// Transpose the elements by generating a load from the reverse
// indices.
- SmallVector<Value *, 2> reverseIvs(llvm::reverse(loopIvs));
+ SmallVector<ValuePtr, 2> reverseIvs(llvm::reverse(loopIvs));
return rewriter.create<AffineLoadOp>(loc, input, reverseIvs);
});
return matchSuccess();
diff --git a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
index d35cc5c576a..c3180b4a92d 100644
--- a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
+++ b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
@@ -51,7 +51,7 @@ public:
: ConversionPattern(toy::PrintOp::getOperationName(), 1, context) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto memRefType = (*op->operand_type_begin()).cast<MemRefType>();
auto memRefShape = memRefType.getShape();
@@ -64,14 +64,14 @@ public:
// Get a symbol reference to the printf function, inserting it if necessary.
auto printfRef = getOrInsertPrintf(rewriter, parentModule, llvmDialect);
- Value *formatSpecifierCst = getOrCreateGlobalString(
+ ValuePtr formatSpecifierCst = getOrCreateGlobalString(
loc, rewriter, "frmt_spec", StringRef("%f \0", 4), parentModule,
llvmDialect);
- Value *newLineCst = getOrCreateGlobalString(
+ ValuePtr newLineCst = getOrCreateGlobalString(
loc, rewriter, "nl", StringRef("\n\0", 2), parentModule, llvmDialect);
// Create a loop for each of the dimensions within the shape.
- SmallVector<Value *, 4> loopIvs;
+ SmallVector<ValuePtr, 4> loopIvs;
for (unsigned i = 0, e = memRefShape.size(); i != e; ++i) {
auto lowerBound = rewriter.create<ConstantIndexOp>(loc, 0);
auto upperBound = rewriter.create<ConstantIndexOp>(loc, memRefShape[i]);
@@ -97,7 +97,7 @@ public:
auto elementLoad = rewriter.create<LoadOp>(loc, printOp.input(), loopIvs);
rewriter.create<CallOp>(
loc, printfRef, rewriter.getIntegerType(32),
- ArrayRef<Value *>({formatSpecifierCst, elementLoad}));
+ ArrayRef<ValuePtr>({formatSpecifierCst, elementLoad}));
// Notify the rewriter that this operation has been removed.
rewriter.eraseOp(op);
@@ -130,10 +130,10 @@ private:
/// Return a value representing an access into a global string with the given
/// name, creating the string if necessary.
- static Value *getOrCreateGlobalString(Location loc, OpBuilder &builder,
- StringRef name, StringRef value,
- ModuleOp module,
- LLVM::LLVMDialect *llvmDialect) {
+ static ValuePtr getOrCreateGlobalString(Location loc, OpBuilder &builder,
+ StringRef name, StringRef value,
+ ModuleOp module,
+ LLVM::LLVMDialect *llvmDialect) {
// Create the global at the entry of the module.
LLVM::GlobalOp global;
if (!(global = module.lookupSymbol<LLVM::GlobalOp>(name))) {
@@ -147,13 +147,13 @@ private:
}
// Get the pointer to the first character in the global string.
- Value *globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
- Value *cst0 = builder.create<LLVM::ConstantOp>(
+ ValuePtr globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
+ ValuePtr cst0 = builder.create<LLVM::ConstantOp>(
loc, LLVM::LLVMType::getInt64Ty(llvmDialect),
builder.getIntegerAttr(builder.getIndexType(), 0));
return builder.create<LLVM::GEPOp>(
loc, LLVM::LLVMType::getInt8PtrTy(llvmDialect), globalPtr,
- ArrayRef<Value *>({cst0, cst0}));
+ ArrayRef<ValuePtr>({cst0, cst0}));
}
};
} // end anonymous namespace
diff --git a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
index b33137a1066..590b21e53a1 100644
--- a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
@@ -108,11 +108,11 @@ private:
/// Entering a function creates a new scope, and the function arguments are
/// added to the mapping. When the processing of a function is terminated, the
/// scope is destroyed and the mappings created in this scope are dropped.
- llvm::ScopedHashTable<StringRef, std::pair<mlir::Value *, VarDeclExprAST *>>
+ llvm::ScopedHashTable<StringRef, std::pair<mlir::ValuePtr, VarDeclExprAST *>>
symbolTable;
using SymbolTableScopeT =
llvm::ScopedHashTableScope<StringRef,
- std::pair<mlir::Value *, VarDeclExprAST *>>;
+ std::pair<mlir::ValuePtr, VarDeclExprAST *>>;
/// A mapping for the functions that have been code generated to MLIR.
llvm::StringMap<mlir::FuncOp> functionMap;
@@ -129,7 +129,7 @@ private:
/// Declare a variable in the current scope, return success if the variable
/// wasn't declared yet.
- mlir::LogicalResult declare(VarDeclExprAST &var, mlir::Value *value) {
+ mlir::LogicalResult declare(VarDeclExprAST &var, mlir::ValuePtr value) {
if (symbolTable.count(var.getName()))
return mlir::failure();
symbolTable.insert(var.getName(), {value, &var});
@@ -301,7 +301,7 @@ private:
}
/// Emit a binary operation
- mlir::Value *mlirGen(BinaryExprAST &binop) {
+ mlir::ValuePtr mlirGen(BinaryExprAST &binop) {
// First emit the operations for each side of the operation before emitting
// the operation itself. For example if the expression is `a + foo(a)`
// 1) First it will visiting the LHS, which will return a reference to the
@@ -313,7 +313,7 @@ private:
// and the result value is returned. If an error occurs we get a nullptr
// and propagate.
//
- mlir::Value *lhs = mlirGen(*binop.getLHS());
+ mlir::ValuePtr lhs = mlirGen(*binop.getLHS());
if (!lhs)
return nullptr;
auto location = loc(binop.loc());
@@ -329,7 +329,7 @@ private:
}
// Otherwise, this is a normal binary op.
- mlir::Value *rhs = mlirGen(*binop.getRHS());
+ mlir::ValuePtr rhs = mlirGen(*binop.getRHS());
if (!rhs)
return nullptr;
@@ -349,8 +349,8 @@ private:
/// This is a reference to a variable in an expression. The variable is
/// expected to have been declared and so should have a value in the symbol
/// table, otherwise emit an error and return nullptr.
- mlir::Value *mlirGen(VariableExprAST &expr) {
- if (auto *variable = symbolTable.lookup(expr.getName()).first)
+ mlir::ValuePtr mlirGen(VariableExprAST &expr) {
+ if (auto variable = symbolTable.lookup(expr.getName()).first)
return variable;
emitError(loc(expr.loc()), "error: unknown variable '")
@@ -363,7 +363,7 @@ private:
auto location = loc(ret.loc());
// 'return' takes an optional expression, handle that case here.
- mlir::Value *expr = nullptr;
+ mlir::ValuePtr expr = nullptr;
if (ret.getExpr().hasValue()) {
if (!(expr = mlirGen(*ret.getExpr().getValue())))
return mlir::failure();
@@ -371,7 +371,7 @@ private:
// Otherwise, this return operation has zero operands.
builder.create<ReturnOp>(location, expr ? makeArrayRef(expr)
- : ArrayRef<mlir::Value *>());
+ : ArrayRef<mlir::ValuePtr>());
return mlir::success();
}
@@ -450,7 +450,7 @@ private:
}
/// Emit an array literal.
- mlir::Value *mlirGen(LiteralExprAST &lit) {
+ mlir::ValuePtr mlirGen(LiteralExprAST &lit) {
mlir::Type type = getType(lit.getDims());
mlir::DenseElementsAttr dataAttribute = getConstantAttr(lit);
@@ -462,7 +462,7 @@ private:
/// Emit a struct literal. It will be emitted as an array of
/// other literals in an Attribute attached to a `toy.struct_constant`
/// operation.
- mlir::Value *mlirGen(StructLiteralExprAST &lit) {
+ mlir::ValuePtr mlirGen(StructLiteralExprAST &lit) {
mlir::ArrayAttr dataAttr;
mlir::Type dataType;
std::tie(dataAttr, dataType) = getConstantAttr(lit);
@@ -493,14 +493,14 @@ private:
/// Emit a call expression. It emits specific operations for the `transpose`
/// builtin. Other identifiers are assumed to be user-defined functions.
- mlir::Value *mlirGen(CallExprAST &call) {
+ mlir::ValuePtr mlirGen(CallExprAST &call) {
llvm::StringRef callee = call.getCallee();
auto location = loc(call.loc());
// Codegen the operands first.
- SmallVector<mlir::Value *, 4> operands;
+ SmallVector<mlir::ValuePtr, 4> operands;
for (auto &expr : call.getArgs()) {
- auto *arg = mlirGen(*expr);
+ auto arg = mlirGen(*expr);
if (!arg)
return nullptr;
operands.push_back(arg);
@@ -534,7 +534,7 @@ private:
/// Emit a print expression. It emits specific operations for two builtins:
/// transpose(x) and print(x).
mlir::LogicalResult mlirGen(PrintExprAST &call) {
- auto *arg = mlirGen(*call.getArg());
+ auto arg = mlirGen(*call.getArg());
if (!arg)
return mlir::failure();
@@ -543,12 +543,12 @@ private:
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
- mlir::Value *mlirGen(NumberExprAST &num) {
+ mlir::ValuePtr mlirGen(NumberExprAST &num) {
return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
- mlir::Value *mlirGen(ExprAST &expr) {
+ mlir::ValuePtr mlirGen(ExprAST &expr) {
switch (expr.getKind()) {
case toy::ExprAST::Expr_BinOp:
return mlirGen(cast<BinaryExprAST>(expr));
@@ -574,7 +574,7 @@ private:
/// initializer and record the value in the symbol table before returning it.
/// Future expressions will be able to reference this variable through symbol
/// table lookup.
- mlir::Value *mlirGen(VarDeclExprAST &vardecl) {
+ mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) {
auto init = vardecl.getInitVal();
if (!init) {
emitError(loc(vardecl.loc()),
@@ -582,7 +582,7 @@ private:
return nullptr;
}
- mlir::Value *value = mlirGen(*init);
+ mlir::ValuePtr value = mlirGen(*init);
if (!value)
return nullptr;
diff --git a/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp
index ebd4f5d1103..d18396c63bb 100644
--- a/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp
@@ -71,7 +71,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
matchAndRewrite(TransposeOp op,
mlir::PatternRewriter &rewriter) const override {
// Look through the input of the current transpose.
- mlir::Value *transposeInput = op.getOperand();
+ mlir::ValuePtr transposeInput = op.getOperand();
TransposeOp transposeInputOp =
llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
diff --git a/mlir/g3doc/DeclarativeRewrites.md b/mlir/g3doc/DeclarativeRewrites.md
index 5adcb320983..9fcd4341611 100644
--- a/mlir/g3doc/DeclarativeRewrites.md
+++ b/mlir/g3doc/DeclarativeRewrites.md
@@ -233,7 +233,7 @@ In the above, we are using `BOp`'s result for building `COp`.
Given that `COp` was specified with table-driven op definition, there will be
several `build()` methods generated for it. One of them has aggregated
parameters for result types, operands, and attributes in the signature: `void
-COp::build(..., ArrayRef<Type> resultTypes, Array<Value *> operands,
+COp::build(..., ArrayRef<Type> resultTypes, Array<ValuePtr> operands,
ArrayRef<NamedAttribute> attr)`. The pattern in the above calls this `build()`
method for constructing the `COp`.
@@ -266,7 +266,7 @@ For example, for the above `AOp`, a possible builder is:
```c++
void AOp::build(Builder *builder, OperationState &state,
- Value *input, Attribute attr) {
+ ValuePtr input, Attribute attr) {
state.addOperands({input});
state.addAttribute("a_attr", attr);
Type type = ...; // Deduce result type here
@@ -422,7 +422,7 @@ op; it can be also used to specify how to build an op entirely. An example:
If we have a C++ function for building an op:
```c++
-Operation *createMyOp(OpBuilder builder, Value *input, Attribute attr);
+Operation *createMyOp(OpBuilder builder, ValuePtr input, Attribute attr);
```
We can wrap it up and invoke it like:
diff --git a/mlir/g3doc/DialectConversion.md b/mlir/g3doc/DialectConversion.md
index b4e309daf1f..6771860366c 100644
--- a/mlir/g3doc/DialectConversion.md
+++ b/mlir/g3doc/DialectConversion.md
@@ -209,7 +209,7 @@ class TypeConverter {
/// the conversion has finished.
virtual Operation *materializeConversion(PatternRewriter &rewriter,
Type resultType,
- ArrayRef<Value *> inputs,
+ ArrayRef<ValuePtr> inputs,
Location loc);
};
```
@@ -232,7 +232,7 @@ struct MyConversionPattern : public ConversionPattern {
/// `operands` parameter, containing the remapped operands of the original
/// operation.
virtual PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const;
};
```
@@ -269,7 +269,7 @@ public:
/// Remap an input of the original signature to another `replacement`
/// value. This drops the original argument.
- void remapInput(unsigned origInputNo, Value *replacement);
+ void remapInput(unsigned origInputNo, ValuePtr replacement);
};
```
diff --git a/mlir/g3doc/EDSC.md b/mlir/g3doc/EDSC.md
index afceac2dfc1..eaaeb6c7009 100644
--- a/mlir/g3doc/EDSC.md
+++ b/mlir/g3doc/EDSC.md
@@ -15,10 +15,10 @@ declarative builders are available within the lifetime of a `ScopedContext`.
## ValueHandle and IndexHandle
`mlir::edsc::ValueHandle` and `mlir::edsc::IndexHandle` provide typed
-abstractions around an `mlir::Value*`. These abstractions are "delayed", in the
-sense that they allow separating declaration from definition. They may
-capture IR snippets, as they are built, for programmatic manipulation.
-Intuitive operators are provided to allow concise and idiomatic expressions.
+abstractions around an `mlir::Value`. These abstractions are "delayed", in the
+sense that they allow separating declaration from definition. They may capture
+IR snippets, as they are built, for programmatic manipulation. Intuitive
+operators are provided to allow concise and idiomatic expressions.
```c++
ValueHandle zero = constant_index(0);
diff --git a/mlir/g3doc/GenericDAGRewriter.md b/mlir/g3doc/GenericDAGRewriter.md
index 3b26c22eb37..64b8f4f7ade 100644
--- a/mlir/g3doc/GenericDAGRewriter.md
+++ b/mlir/g3doc/GenericDAGRewriter.md
@@ -128,7 +128,7 @@ complicated :)
if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
if (C1->countTrailingZeros() == 0)
if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
- Value *NewOr = Builder.CreateOr(Z, ~(*C2));
+ ValuePtr NewOr = Builder.CreateOr(Z, ~(*C2));
return Builder.CreateSub(RHS, NewOr, "sub");
}
```
diff --git a/mlir/g3doc/OpDefinitions.md b/mlir/g3doc/OpDefinitions.md
index 1f98671d59a..1db18266ee0 100644
--- a/mlir/g3doc/OpDefinitions.md
+++ b/mlir/g3doc/OpDefinitions.md
@@ -360,7 +360,7 @@ def MyInterface : OpInterface<"MyInterface"> {
// A new non-static method accepting an input argument.
InterfaceMethod<"/*insert doc here*/",
- "Value *", "bar", (ins "unsigned":$i)
+ "ValuePtr ", "bar", (ins "unsigned":$i)
>,
// Query a static property of the derived operation.
@@ -438,7 +438,7 @@ static void build(Builder *tblgen_builder, OperationState &tblgen_state,
// for attributes are of mlir::Attribute types.
static void build(Builder *tblgen_builder, OperationState &tblgen_state,
Type i32_result, Type f32_result, ...,
- Value *i32_operand, Value *f32_operand, ...,
+ ValuePtr i32_operand, ValuePtr f32_operand, ...,
IntegerAttr i32_attr, FloatAttr f32_attr, ...);
// Each result-type/operand/attribute has a separate parameter. The parameters
@@ -447,13 +447,13 @@ static void build(Builder *tblgen_builder, OperationState &tblgen_state,
// explanation for more details.)
static void build(Builder *tblgen_builder, OperationState &tblgen_state,
Type i32_result, Type f32_result, ...,
- Value *i32_operand, Value *f32_operand, ...,
+ ValuePtr i32_operand, ValuePtr f32_operand, ...,
APInt i32_attr, StringRef f32_attr, ...);
// Each operand/attribute has a separate parameter but result type is aggregate.
static void build(Builder *tblgen_builder, OperationState &tblgen_state,
ArrayRef<Type> resultTypes,
- Value *i32_operand, Value *f32_operand, ...,
+ ValuePtr i32_operand, ValuePtr f32_operand, ...,
IntegerAttr i32_attr, FloatAttr f32_attr, ...);
// All operands/attributes have aggregate parameters.
@@ -615,7 +615,7 @@ coding style requirements.
For each operation, we automatically generate an _operand adaptor_. This class
solves the problem of accessing operands provided as a list of `Value`s without
using "magic" constants. The operand adaptor takes a reference to an array of
-`Value *` and provides methods with the same names as those in the operation
+`ValuePtr` and provides methods with the same names as those in the operation
class to access them. For example, for a binary arithmetic operation, it may
provide `.lhs()` to access the first operand and `.rhs()` to access the second
operand.
@@ -629,11 +629,11 @@ Operand adaptors can be used in function templates that also process operations:
```c++
template <typename BinaryOpTy>
-std::pair<Value *, Value *> zip(BinaryOpTy &&op) {
+std::pair<ValuePtr, ValuePtr> zip(BinaryOpTy &&op) {
return std::make_pair(op.lhs(), op.rhs());;
}
-void process(AddOp op, ArrayRef<Value *> newOperands) {
+void process(AddOp op, ArrayRef<ValuePtr> newOperands) {
zip(op);
zip(OperandAdaptor<AddOp>(newOperands));
/*...*/
diff --git a/mlir/g3doc/QuickstartRewrites.md b/mlir/g3doc/QuickstartRewrites.md
index d7bf9a54370..6a4a7cca8b8 100644
--- a/mlir/g3doc/QuickstartRewrites.md
+++ b/mlir/g3doc/QuickstartRewrites.md
@@ -128,8 +128,8 @@ def : Pat<(TF_LeakyReluOp:$old_value, $arg, F32Attr:$a),
```
```c++
-static Value* createTFLLeakyRelu(PatternRewriter &rewriter, Operation *op,
- Value* operand, Attribute attr) {
+static Value createTFLLeakyRelu(PatternRewriter &rewriter, Operation *op,
+ Value operand, Attribute attr) {
return rewriter.create<mlir::TFL::LeakyReluOp>(
op->getLoc(), operands[0]->getType(), /*arg=*/operands[0],
/*alpha=*/attrs[0].cast<FloatAttr>());
diff --git a/mlir/g3doc/Rationale.md b/mlir/g3doc/Rationale.md
index 66cf800621d..763442dce06 100644
--- a/mlir/g3doc/Rationale.md
+++ b/mlir/g3doc/Rationale.md
@@ -1099,7 +1099,7 @@ those chunks independently.
The problem is that LLVM has several objects in its IR that are globally uniqued
and also mutable: notably constants like `i32 0`. In LLVM, these constants are
-`Value*r`'s, which allow them to be used as operands to instructions, and that
+`Value`'s, which allow them to be used as operands to instructions, and that
they also have SSA use lists. Because these things are uniqued, every `i32 0` in
any function shares a use list. This means that optimizing multiple functions in
parallel won't work (at least without some sort of synchronization on the use
diff --git a/mlir/g3doc/Tutorials/Toy/Ch-3.md b/mlir/g3doc/Tutorials/Toy/Ch-3.md
index 07ead64d455..fb470434d6f 100644
--- a/mlir/g3doc/Tutorials/Toy/Ch-3.md
+++ b/mlir/g3doc/Tutorials/Toy/Ch-3.md
@@ -90,7 +90,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
matchAndRewrite(TransposeOp op,
mlir::PatternRewriter &rewriter) const override {
// Look through the input of the current transpose.
- mlir::Value *transposeInput = op.getOperand();
+ mlir::ValuePtr transposeInput = op.getOperand();
TransposeOp transposeInputOp =
llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
// If the input is defined by another Transpose, bingo!
diff --git a/mlir/g3doc/Tutorials/Toy/Ch-4.md b/mlir/g3doc/Tutorials/Toy/Ch-4.md
index ac124699c2f..921e5cdc52a 100644
--- a/mlir/g3doc/Tutorials/Toy/Ch-4.md
+++ b/mlir/g3doc/Tutorials/Toy/Ch-4.md
@@ -75,7 +75,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
/// previously returned by the call operation with the operands of the
/// return.
void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const final {
+ ArrayRef<ValuePtr> valuesToRepl) const final {
// Only "toy.return" needs to be handled here.
auto returnOp = cast<ReturnOp>(op);
@@ -207,7 +207,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
/// operation that takes 'input' as the only operand, and produces a single
/// result of 'resultType'. If a conversion can not be generated, nullptr
/// should be returned.
- Operation *materializeCallConversion(OpBuilder &builder, Value *input,
+ Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input,
Type resultType,
Location conversionLoc) const final {
return builder.create<CastOp>(conversionLoc, resultType, input);
diff --git a/mlir/g3doc/Tutorials/Toy/Ch-5.md b/mlir/g3doc/Tutorials/Toy/Ch-5.md
index 1124cf14a43..ed62f8954b7 100644
--- a/mlir/g3doc/Tutorials/Toy/Ch-5.md
+++ b/mlir/g3doc/Tutorials/Toy/Ch-5.md
@@ -101,7 +101,7 @@ struct TransposeOpLowering : public mlir::ConversionPattern {
/// Match and rewrite the given `toy.transpose` operation, with the given
/// operands that have been remapped from `tensor<...>` to `memref<...>`.
mlir::PatternMatchResult
- matchAndRewrite(mlir::Operation *op, ArrayRef<mlir::Value *> operands,
+ matchAndRewrite(mlir::Operation *op, ArrayRef<mlir::ValuePtr> operands,
mlir::ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
@@ -112,18 +112,18 @@ struct TransposeOpLowering : public mlir::ConversionPattern {
lowerOpToLoops(
op, operands, rewriter,
[loc](mlir::PatternRewriter &rewriter,
- ArrayRef<mlir::Value *> memRefOperands,
- ArrayRef<mlir::Value *> loopIvs) {
+ ArrayRef<mlir::ValuePtr> memRefOperands,
+ ArrayRef<mlir::ValuePtr> loopIvs) {
// Generate an adaptor for the remapped operands of the TransposeOp.
// This allows for using the nice named accessors that are generated
// by the ODS. This adaptor is automatically provided by the ODS
// framework.
TransposeOpOperandAdaptor transposeAdaptor(memRefOperands);
- mlir::Value *input = transposeAdaptor.input();
+ mlir::ValuePtr input = transposeAdaptor.input();
// Transpose the elements by generating a load from the reverse
// indices.
- SmallVector<mlir::Value *, 2> reverseIvs(llvm::reverse(loopIvs));
+ SmallVector<mlir::ValuePtr, 2> reverseIvs(llvm::reverse(loopIvs));
return rewriter.create<mlir::AffineLoadOp>(loc, input, reverseIvs);
});
return matchSuccess();
diff --git a/mlir/g3doc/UsageOfConst.md b/mlir/g3doc/UsageOfConst.md
index 052f14ddf01..5f6d3793164 100644
--- a/mlir/g3doc/UsageOfConst.md
+++ b/mlir/g3doc/UsageOfConst.md
@@ -10,7 +10,7 @@ understood (even though the LLVM implementation is flawed in many ways).
The design team since decided to change to a different module, which eschews
`const` entirely for the core IR types: you should never see a `const` method on
-`Operation`, should never see the type `const Value *`, and you shouldn't feel
+`Operation`, should never see the type `const ValuePtr`, and you shouldn't feel
bad about this. That said, you *should* use `const` for non-IR types, like
`SmallVector`'s and many other things.
@@ -39,7 +39,7 @@ into the MLIR codebase, argues that the cost/benefit tradeoff of this design is
a poor tradeoff, and proposes switching to a much simpler approach - eliminating
the use of const of these IR types entirely.
-**Note:** **This document is only discussing things like `const Value*` and
+**Note:** **This document is only discussing things like `const Value` and
`const Operation*`. There is no proposed change for other types, e.g.
`SmallVector` references, the immutable types like `Attribute`, etc.**
@@ -130,7 +130,7 @@ const.
operand_iterator operand_begin();
operand_iterator operand_end();
- /// Returns an iterator on the underlying Value's (Value *).
+ /// Returns an iterator on the underlying Value's (ValuePtr ).
operand_range getOperands();
// Support const operand iteration.
@@ -141,7 +141,7 @@ const.
const_operand_iterator operand_begin() const;
const_operand_iterator operand_end() const;
- /// Returns a const iterator on the underlying Value's (Value *).
+ /// Returns a const iterator on the underlying Value's (ValuePtr ).
llvm::iterator_range<const_operand_iterator> getOperands() const;
ArrayRef<OpOperand> getOpOperands() const {
diff --git a/mlir/include/mlir/Analysis/AffineAnalysis.h b/mlir/include/mlir/Analysis/AffineAnalysis.h
index 8243d1f6f63..f506470f36a 100644
--- a/mlir/include/mlir/Analysis/AffineAnalysis.h
+++ b/mlir/include/mlir/Analysis/AffineAnalysis.h
@@ -39,10 +39,13 @@ class FlatAffineConstraints;
class Operation;
class Value;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
/// Returns in `affineApplyOps`, the sequence of those AffineApplyOp
/// Operations that are reachable via a search starting from `operands` and
/// ending at those operands that are not the result of an AffineApplyOp.
-void getReachableAffineApplyOps(ArrayRef<Value *> operands,
+void getReachableAffineApplyOps(ArrayRef<ValuePtr> operands,
SmallVectorImpl<Operation *> &affineApplyOps);
/// Builds a system of constraints with dimensional identifiers corresponding to
@@ -56,9 +59,9 @@ LogicalResult getIndexSet(MutableArrayRef<AffineForOp> forOps,
/// Encapsulates a memref load or store access information.
struct MemRefAccess {
- Value *memref;
+ ValuePtr memref;
Operation *opInst;
- SmallVector<Value *, 4> indices;
+ SmallVector<ValuePtr, 4> indices;
/// Constructs a MemRefAccess from a load or store operation.
// TODO(b/119949820): add accessors to standard op's load, store, DMA op's to
diff --git a/mlir/include/mlir/Analysis/AffineStructures.h b/mlir/include/mlir/Analysis/AffineStructures.h
index e53af5024da..65cf13a0ce6 100644
--- a/mlir/include/mlir/Analysis/AffineStructures.h
+++ b/mlir/include/mlir/Analysis/AffineStructures.h
@@ -123,8 +123,8 @@ public:
// Creates an empty AffineValueMap (users should call 'reset' to reset map
// and operands).
AffineValueMap() {}
- AffineValueMap(AffineMap map, ArrayRef<Value *> operands,
- ArrayRef<Value *> results = llvm::None);
+ AffineValueMap(AffineMap map, ArrayRef<ValuePtr> operands,
+ ArrayRef<ValuePtr> results = llvm::None);
explicit AffineValueMap(AffineApplyOp applyOp);
explicit AffineValueMap(AffineBound bound);
@@ -132,8 +132,8 @@ public:
~AffineValueMap();
// Resets this AffineValueMap with 'map', 'operands', and 'results'.
- void reset(AffineMap map, ArrayRef<Value *> operands,
- ArrayRef<Value *> results = llvm::None);
+ void reset(AffineMap map, ArrayRef<ValuePtr> operands,
+ ArrayRef<ValuePtr> results = llvm::None);
/// Return the value map that is the difference of value maps 'a' and 'b',
/// represented as an affine map and its operands. The output map + operands
@@ -146,7 +146,7 @@ public:
inline bool isMultipleOf(unsigned idx, int64_t factor) const;
/// Return true if the idx^th result depends on 'value', false otherwise.
- bool isFunctionOf(unsigned idx, Value *value) const;
+ bool isFunctionOf(unsigned idx, ValuePtr value) const;
/// Return true if the result at 'idx' is a constant, false
/// otherwise.
@@ -162,8 +162,8 @@ public:
inline unsigned getNumSymbols() const { return map.getNumSymbols(); }
inline unsigned getNumResults() const { return map.getNumResults(); }
- Value *getOperand(unsigned i) const;
- ArrayRef<Value *> getOperands() const;
+ ValuePtr getOperand(unsigned i) const;
+ ArrayRef<ValuePtr> getOperands() const;
AffineMap getAffineMap() const;
private:
@@ -172,9 +172,9 @@ private:
// TODO: make these trailing objects?
/// The SSA operands binding to the dim's and symbols of 'map'.
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
/// The SSA results binding to the results of 'map'.
- SmallVector<Value *, 4> results;
+ SmallVector<ValuePtr, 4> results;
};
/// An IntegerValueSet is an integer set plus its operands.
@@ -207,7 +207,7 @@ private:
// 'AffineCondition'.
MutableIntegerSet set;
/// The SSA operands binding to the dim's and symbols of 'set'.
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
};
/// A flat list of affine equalities and inequalities in the form.
@@ -245,7 +245,7 @@ public:
unsigned numReservedEqualities,
unsigned numReservedCols, unsigned numDims = 0,
unsigned numSymbols = 0, unsigned numLocals = 0,
- ArrayRef<Optional<Value *>> idArgs = {})
+ ArrayRef<Optional<ValuePtr>> idArgs = {})
: numReservedCols(numReservedCols), numDims(numDims),
numSymbols(numSymbols) {
assert(numReservedCols >= numDims + numSymbols + 1);
@@ -264,7 +264,7 @@ public:
/// dimensions and symbols.
FlatAffineConstraints(unsigned numDims = 0, unsigned numSymbols = 0,
unsigned numLocals = 0,
- ArrayRef<Optional<Value *>> idArgs = {})
+ ArrayRef<Optional<ValuePtr>> idArgs = {})
: numReservedCols(numDims + numSymbols + numLocals + 1), numDims(numDims),
numSymbols(numSymbols) {
assert(numReservedCols >= numDims + numSymbols + 1);
@@ -304,10 +304,10 @@ public:
// Clears any existing data and reserves memory for the specified constraints.
void reset(unsigned numReservedInequalities, unsigned numReservedEqualities,
unsigned numReservedCols, unsigned numDims, unsigned numSymbols,
- unsigned numLocals = 0, ArrayRef<Value *> idArgs = {});
+ unsigned numLocals = 0, ArrayRef<ValuePtr> idArgs = {});
void reset(unsigned numDims = 0, unsigned numSymbols = 0,
- unsigned numLocals = 0, ArrayRef<Value *> idArgs = {});
+ unsigned numLocals = 0, ArrayRef<ValuePtr> idArgs = {});
/// Appends constraints from 'other' into this. This is equivalent to an
/// intersection with no simplification of any sort attempted.
@@ -396,7 +396,7 @@ public:
/// operands. If `eq` is true, add a single equality equal to the bound map's
/// first result expr.
LogicalResult addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
- ArrayRef<Value *> operands, bool eq,
+ ArrayRef<ValuePtr> operands, bool eq,
bool lower = true);
/// Computes the lower and upper bounds of the first 'num' dimensional
@@ -415,10 +415,10 @@ public:
/// operand list 'operands'.
/// This function assumes 'values.size' == 'lbMaps.size' == 'ubMaps.size'.
/// Note that both lower/upper bounds use operands from 'operands'.
- LogicalResult addSliceBounds(ArrayRef<Value *> values,
+ LogicalResult addSliceBounds(ArrayRef<ValuePtr> values,
ArrayRef<AffineMap> lbMaps,
ArrayRef<AffineMap> ubMaps,
- ArrayRef<Value *> operands);
+ ArrayRef<ValuePtr> operands);
// Adds an inequality (>= 0) from the coefficients specified in inEq.
void addInequality(ArrayRef<int64_t> inEq);
@@ -447,25 +447,25 @@ public:
/// Sets the identifier corresponding to the specified Value id to a
/// constant. Asserts if the 'id' is not found.
- void setIdToConstant(Value &id, int64_t val);
+ void setIdToConstant(ValueRef id, int64_t val);
/// Looks up the position of the identifier with the specified Value. Returns
/// true if found (false otherwise). `pos' is set to the (column) position of
/// the identifier.
- bool findId(Value &id, unsigned *pos) const;
+ bool findId(ValueRef id, unsigned *pos) const;
/// Returns true if an identifier with the specified Value exists, false
/// otherwise.
- bool containsId(Value &id) const;
+ bool containsId(ValueRef id) const;
// Add identifiers of the specified kind - specified positions are relative to
// the kind of identifier. The coefficient column corresponding to the added
// identifier is initialized to zero. 'id' is the Value corresponding to the
// identifier that can optionally be provided.
- void addDimId(unsigned pos, Value *id = nullptr);
- void addSymbolId(unsigned pos, Value *id = nullptr);
+ void addDimId(unsigned pos, ValuePtr id = nullptr);
+ void addSymbolId(unsigned pos, ValuePtr id = nullptr);
void addLocalId(unsigned pos);
- void addId(IdKind kind, unsigned pos, Value *id = nullptr);
+ void addId(IdKind kind, unsigned pos, ValuePtr id = nullptr);
/// Add the specified values as a dim or symbol id depending on its nature, if
/// it already doesn't exist in the system. `id' has to be either a terminal
@@ -473,7 +473,7 @@ public:
/// symbols or loop IVs. The identifier is added to the end of the existing
/// dims or symbols. Additional information on the identifier is extracted
/// from the IR and added to the constraint system.
- void addInductionVarOrTerminalSymbol(Value *id);
+ void addInductionVarOrTerminalSymbol(ValuePtr id);
/// Composes the affine value map with this FlatAffineConstrains, adding the
/// results of the map as dimensions at the front [0, vMap->getNumResults())
@@ -500,8 +500,8 @@ public:
void projectOut(unsigned pos, unsigned num);
inline void projectOut(unsigned pos) { return projectOut(pos, 1); }
- /// Projects out the identifier that is associate with Value *.
- void projectOut(Value *id);
+ /// Projects out the identifier that is associate with ValuePtr .
+ void projectOut(ValuePtr id);
void removeId(IdKind idKind, unsigned pos);
void removeId(unsigned pos);
@@ -577,20 +577,20 @@ public:
return numIds - numDims - numSymbols;
}
- inline ArrayRef<Optional<Value *>> getIds() const {
+ inline ArrayRef<Optional<ValuePtr>> getIds() const {
return {ids.data(), ids.size()};
}
- inline MutableArrayRef<Optional<Value *>> getIds() {
+ inline MutableArrayRef<Optional<ValuePtr>> getIds() {
return {ids.data(), ids.size()};
}
/// Returns the optional Value corresponding to the pos^th identifier.
- inline Optional<Value *> getId(unsigned pos) const { return ids[pos]; }
- inline Optional<Value *> &getId(unsigned pos) { return ids[pos]; }
+ inline Optional<ValuePtr> getId(unsigned pos) const { return ids[pos]; }
+ inline Optional<ValuePtr> &getId(unsigned pos) { return ids[pos]; }
/// Returns the Value associated with the pos^th identifier. Asserts if
/// no Value identifier was associated.
- inline Value *getIdValue(unsigned pos) const {
+ inline ValuePtr getIdValue(unsigned pos) const {
assert(ids[pos].hasValue() && "identifier's Value not set");
return ids[pos].getValue();
}
@@ -598,7 +598,7 @@ public:
/// Returns the Values associated with identifiers in range [start, end).
/// Asserts if no Value was associated with one of these identifiers.
void getIdValues(unsigned start, unsigned end,
- SmallVectorImpl<Value *> *values) const {
+ SmallVectorImpl<ValuePtr> *values) const {
assert((start < numIds || start == end) && "invalid start position");
assert(end <= numIds && "invalid end position");
values->clear();
@@ -607,17 +607,17 @@ public:
values->push_back(getIdValue(i));
}
}
- inline void getAllIdValues(SmallVectorImpl<Value *> *values) const {
+ inline void getAllIdValues(SmallVectorImpl<ValuePtr> *values) const {
getIdValues(0, numIds, values);
}
/// Sets Value associated with the pos^th identifier.
- inline void setIdValue(unsigned pos, Value *val) {
+ inline void setIdValue(unsigned pos, ValuePtr val) {
assert(pos < numIds && "invalid id position");
ids[pos] = val;
}
/// Sets Values associated with identifiers in the range [start, end).
- void setIdValues(unsigned start, unsigned end, ArrayRef<Value *> values) {
+ void setIdValues(unsigned start, unsigned end, ArrayRef<ValuePtr> values) {
assert((start < numIds || end == start) && "invalid start position");
assert(end <= numIds && "invalid end position");
assert(values.size() == end - start);
@@ -766,7 +766,7 @@ private:
/// system appearing in the order the identifiers correspond to columns.
/// Temporary ones or those that aren't associated to any Value are set to
/// None.
- SmallVector<Optional<Value *>, 8> ids;
+ SmallVector<Optional<ValuePtr>, 8> ids;
/// A parameter that controls detection of an unrealistic number of
/// constraints. If the number of constraints is this many times the number of
diff --git a/mlir/include/mlir/Analysis/CallInterfaces.h b/mlir/include/mlir/Analysis/CallInterfaces.h
index dd23d77889f..a18cfa7aba4 100644
--- a/mlir/include/mlir/Analysis/CallInterfaces.h
+++ b/mlir/include/mlir/Analysis/CallInterfaces.h
@@ -30,8 +30,8 @@ namespace mlir {
/// A callable is either a symbol, or an SSA value, that is referenced by a
/// call-like operation. This represents the destination of the call.
-struct CallInterfaceCallable : public PointerUnion<SymbolRefAttr, Value *> {
- using PointerUnion<SymbolRefAttr, Value *>::PointerUnion;
+struct CallInterfaceCallable : public PointerUnion<SymbolRefAttr, ValuePtr> {
+ using PointerUnion<SymbolRefAttr, ValuePtr>::PointerUnion;
};
#include "mlir/Analysis/CallInterfaces.h.inc"
diff --git a/mlir/include/mlir/Analysis/Dominance.h b/mlir/include/mlir/Analysis/Dominance.h
index 09114eafbb1..f46241e2af0 100644
--- a/mlir/include/mlir/Analysis/Dominance.h
+++ b/mlir/include/mlir/Analysis/Dominance.h
@@ -74,10 +74,10 @@ public:
}
/// Return true if value A properly dominates operation B.
- bool properlyDominates(Value *a, Operation *b);
+ bool properlyDominates(ValuePtr a, Operation *b);
/// Return true if operation A dominates operation B.
- bool dominates(Value *a, Operation *b) {
+ bool dominates(ValuePtr a, Operation *b) {
return (Operation *)a->getDefiningOp() == b || properlyDominates(a, b);
}
diff --git a/mlir/include/mlir/Analysis/Liveness.h b/mlir/include/mlir/Analysis/Liveness.h
index 0bdb474fd92..0aa9d9693e4 100644
--- a/mlir/include/mlir/Analysis/Liveness.h
+++ b/mlir/include/mlir/Analysis/Liveness.h
@@ -41,6 +41,9 @@ class Operation;
class Region;
class Value;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
/// Represents an analysis for computing liveness information from a
/// given top-level operation. The analysis iterates over all associated
/// regions that are attached to the given top-level operation. It
@@ -57,7 +60,7 @@ class Liveness {
public:
using OperationListT = std::vector<Operation *>;
using BlockMapT = DenseMap<Block *, LivenessBlockInfo>;
- using ValueSetT = SmallPtrSet<Value *, 16>;
+ using ValueSetT = SmallPtrSet<ValuePtr, 16>;
public:
/// Creates a new Liveness analysis that computes liveness
@@ -72,7 +75,7 @@ public:
/// Note that the operations in this list are not ordered and the current
/// implementation is computationally expensive (as it iterates over all
/// blocks in which the given value is live).
- OperationListT resolveLiveness(Value *value) const;
+ OperationListT resolveLiveness(ValuePtr value) const;
/// Gets liveness info (if any) for the block.
const LivenessBlockInfo *getLiveness(Block *block) const;
@@ -85,7 +88,7 @@ public:
/// Returns true if the given operation represent the last use of the
/// given value.
- bool isLastUse(Value *value, Operation *operation) const;
+ bool isLastUse(ValuePtr value, Operation *operation) const;
/// Dumps the liveness information in a human readable format.
void dump() const;
@@ -124,20 +127,20 @@ public:
const ValueSetT &out() const { return outValues; }
/// Returns true if the given value is in the live-in set.
- bool isLiveIn(Value *value) const;
+ bool isLiveIn(ValuePtr value) const;
/// Returns true if the given value is in the live-out set.
- bool isLiveOut(Value *value) const;
+ bool isLiveOut(ValuePtr value) const;
/// Gets the start operation for the given value. This is the first operation
/// the given value is considered to be live. This could either be the start
/// operation of the current block (in case the value is live-in) or the
/// operation that defines the given value (must be referenced in this block).
- Operation *getStartOperation(Value *value) const;
+ Operation *getStartOperation(ValuePtr value) const;
/// Gets the end operation for the given value using the start operation
/// provided (must be referenced in this block).
- Operation *getEndOperation(Value *value, Operation *startOperation) const;
+ Operation *getEndOperation(ValuePtr value, Operation *startOperation) const;
private:
/// The underlying block.
diff --git a/mlir/include/mlir/Analysis/LoopAnalysis.h b/mlir/include/mlir/Analysis/LoopAnalysis.h
index 47cc22a4923..ad7dc6d6092 100644
--- a/mlir/include/mlir/Analysis/LoopAnalysis.h
+++ b/mlir/include/mlir/Analysis/LoopAnalysis.h
@@ -36,6 +36,9 @@ class NestedPattern;
class Operation;
class Value;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
/// Returns the trip count of the loop as an affine map with its corresponding
/// operands if the latter is expressible as an affine expression, and nullptr
/// otherwise. This method always succeeds as long as the lower bound is not a
@@ -45,7 +48,7 @@ class Value;
// TODO(mlir-team): this should be moved into 'Transforms/' and be replaced by a
// pure analysis method relying on FlatAffineConstraints
void buildTripCountMapAndOperands(AffineForOp forOp, AffineMap *map,
- SmallVectorImpl<Value *> *operands);
+ SmallVectorImpl<ValuePtr> *operands);
/// Returns the trip count of the loop if it's a constant, None otherwise. This
/// uses affine expression analysis and is able to determine constant trip count
@@ -66,8 +69,8 @@ uint64_t getLargestDivisorOfTripCount(AffineForOp forOp);
///
/// Emits a note if it encounters a chain of affine.apply and conservatively
/// those cases.
-DenseSet<Value *, DenseMapInfo<Value *>>
-getInvariantAccesses(Value *iv, ArrayRef<Value *> indices);
+DenseSet<ValuePtr, DenseMapInfo<ValuePtr>>
+getInvariantAccesses(ValuePtr iv, ArrayRef<ValuePtr> indices);
using VectorizableLoopFun = std::function<bool(AffineForOp)>;
diff --git a/mlir/include/mlir/Analysis/Utils.h b/mlir/include/mlir/Analysis/Utils.h
index cffa222154f..ea0987df3fe 100644
--- a/mlir/include/mlir/Analysis/Utils.h
+++ b/mlir/include/mlir/Analysis/Utils.h
@@ -55,7 +55,7 @@ unsigned getNestingDepth(Operation &op);
/// Returns in 'sequentialLoops' all sequential loops in loop nest rooted
/// at 'forOp'.
void getSequentialLoops(AffineForOp forOp,
- llvm::SmallDenseSet<Value *, 8> *sequentialLoops);
+ llvm::SmallDenseSet<ValuePtr, 8> *sequentialLoops);
/// ComputationSliceState aggregates loop IVs, loop bound AffineMaps and their
/// associated operands for a set of loops within a loop nest (typically the
@@ -64,15 +64,15 @@ void getSequentialLoops(AffineForOp forOp,
struct ComputationSliceState {
// List of sliced loop IVs (ordered from outermost to innermost).
// EX: 'ivs[i]' has lower bound 'lbs[i]' and upper bound 'ubs[i]'.
- SmallVector<Value *, 4> ivs;
+ SmallVector<ValuePtr, 4> ivs;
// List of lower bound AffineMaps.
SmallVector<AffineMap, 4> lbs;
// List of upper bound AffineMaps.
SmallVector<AffineMap, 4> ubs;
// List of lower bound operands (lbOperands[i] are used by 'lbs[i]').
- std::vector<SmallVector<Value *, 4>> lbOperands;
+ std::vector<SmallVector<ValuePtr, 4>> lbOperands;
// List of upper bound operands (ubOperands[i] are used by 'ubs[i]').
- std::vector<SmallVector<Value *, 4>> ubOperands;
+ std::vector<SmallVector<ValuePtr, 4>> ubOperands;
// Slice loop nest insertion point in target loop nest.
Block::iterator insertPoint;
// Adds to 'cst' with constraints which represent the slice bounds on 'ivs'
@@ -257,7 +257,7 @@ struct MemRefRegion {
unsigned getRank() const;
/// Memref that this region corresponds to.
- Value *memref;
+ ValuePtr memref;
/// Read or write.
bool write;
diff --git a/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h b/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h
index b5c51ad4b4c..4bbe6610e31 100644
--- a/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h
+++ b/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h
@@ -30,14 +30,17 @@ class OpBuilder;
class RewritePattern;
class Value;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
// Owning list of rewriting patterns.
class OwningRewritePatternList;
/// Emit code that computes the given affine expression using standard
/// arithmetic operations applied to the provided dimension and symbol values.
-Value *expandAffineExpr(OpBuilder &builder, Location loc, AffineExpr expr,
- ArrayRef<Value *> dimValues,
- ArrayRef<Value *> symbolValues);
+ValuePtr expandAffineExpr(OpBuilder &builder, Location loc, AffineExpr expr,
+ ArrayRef<ValuePtr> dimValues,
+ ArrayRef<ValuePtr> symbolValues);
/// Collect a set of patterns to convert from the Affine dialect to the Standard
/// dialect, in particular convert structured affine control flow into CFG
@@ -47,11 +50,11 @@ void populateAffineToStdConversionPatterns(OwningRewritePatternList &patterns,
/// Emit code that computes the lower bound of the given affine loop using
/// standard arithmetic operations.
-Value *lowerAffineLowerBound(AffineForOp op, OpBuilder &builder);
+ValuePtr lowerAffineLowerBound(AffineForOp op, OpBuilder &builder);
/// Emit code that computes the upper bound of the given affine loop using
/// standard arithmetic operations.
-Value *lowerAffineUpperBound(AffineForOp op, OpBuilder &builder);
+ValuePtr lowerAffineUpperBound(AffineForOp op, OpBuilder &builder);
} // namespace mlir
#endif // MLIR_CONVERSION_AFFINETOSTANDARD_AFFINETOSTANDARD_H
diff --git a/mlir/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h b/mlir/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h
index 0aab8723eab..58d49a13391 100644
--- a/mlir/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h
+++ b/mlir/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h
@@ -24,6 +24,9 @@ class AffineForOp;
struct LogicalResult;
class Value;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
namespace loop {
class ForOp;
} // end namespace loop
@@ -78,8 +81,8 @@ LogicalResult convertLoopNestToGPULaunch(loop::ForOp forOp,
/// The above conditions are assumed to be satisfied by the computation rooted
/// at `forOp`.
LogicalResult convertLoopToGPULaunch(loop::ForOp forOp,
- ArrayRef<Value *> numWorkGroups,
- ArrayRef<Value *> workGroupSizes);
+ ArrayRef<ValuePtr> numWorkGroups,
+ ArrayRef<ValuePtr> workGroupSizes);
} // namespace mlir
diff --git a/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h b/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
index e8d16f064a8..6f41fb68633 100644
--- a/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
+++ b/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
@@ -74,16 +74,16 @@ public:
/// Promote the LLVM struct representation of all MemRef descriptors to stack
/// and use pointers to struct to avoid the complexity of the
/// platform-specific C/C++ ABI lowering related to struct argument passing.
- SmallVector<Value *, 4> promoteMemRefDescriptors(Location loc,
- ValueRange opOperands,
- ValueRange operands,
- OpBuilder &builder);
+ SmallVector<ValuePtr, 4> promoteMemRefDescriptors(Location loc,
+ ValueRange opOperands,
+ ValueRange operands,
+ OpBuilder &builder);
/// Promote the LLVM struct representation of one MemRef descriptor to stack
/// and use pointer to struct to avoid the complexity of the platform-specific
/// C/C++ ABI lowering related to struct argument passing.
- Value *promoteOneMemRefDescriptor(Location loc, Value *operand,
- OpBuilder &builder);
+ ValuePtr promoteOneMemRefDescriptor(Location loc, ValuePtr operand,
+ OpBuilder &builder);
protected:
/// LLVM IR module used to parse/create types.
@@ -139,24 +139,24 @@ private:
class StructBuilder {
public:
/// Construct a helper for the given value.
- explicit StructBuilder(Value *v);
+ explicit StructBuilder(ValuePtr v);
/// Builds IR creating an `undef` value of the descriptor type.
static StructBuilder undef(OpBuilder &builder, Location loc,
Type descriptorType);
- /*implicit*/ operator Value *() { return value; }
+ /*implicit*/ operator ValuePtr() { return value; }
protected:
// LLVM value
- Value *value;
+ ValuePtr value;
// Cached struct type.
Type structType;
protected:
/// Builds IR to extract a value from the struct at position pos
- Value *extractPtr(OpBuilder &builder, Location loc, unsigned pos);
+ ValuePtr extractPtr(OpBuilder &builder, Location loc, unsigned pos);
/// Builds IR to set a value in the struct at position pos
- void setPtr(OpBuilder &builder, Location loc, unsigned pos, Value *ptr);
+ void setPtr(OpBuilder &builder, Location loc, unsigned pos, ValuePtr ptr);
};
/// Helper class to produce LLVM dialect operations extracting or inserting
/// elements of a MemRef descriptor. Wraps a Value pointing to the descriptor.
@@ -164,7 +164,7 @@ protected:
class MemRefDescriptor : public StructBuilder {
public:
/// Construct a helper for the given descriptor value.
- explicit MemRefDescriptor(Value *descriptor);
+ explicit MemRefDescriptor(ValuePtr descriptor);
/// Builds IR creating an `undef` value of the descriptor type.
static MemRefDescriptor undef(OpBuilder &builder, Location loc,
Type descriptorType);
@@ -173,39 +173,40 @@ public:
/// type.
static MemRefDescriptor fromStaticShape(OpBuilder &builder, Location loc,
LLVMTypeConverter &typeConverter,
- MemRefType type, Value *memory);
+ MemRefType type, ValuePtr memory);
/// Builds IR extracting the allocated pointer from the descriptor.
- Value *allocatedPtr(OpBuilder &builder, Location loc);
+ ValuePtr allocatedPtr(OpBuilder &builder, Location loc);
/// Builds IR inserting the allocated pointer into the descriptor.
- void setAllocatedPtr(OpBuilder &builder, Location loc, Value *ptr);
+ void setAllocatedPtr(OpBuilder &builder, Location loc, ValuePtr ptr);
/// Builds IR extracting the aligned pointer from the descriptor.
- Value *alignedPtr(OpBuilder &builder, Location loc);
+ ValuePtr alignedPtr(OpBuilder &builder, Location loc);
/// Builds IR inserting the aligned pointer into the descriptor.
- void setAlignedPtr(OpBuilder &builder, Location loc, Value *ptr);
+ void setAlignedPtr(OpBuilder &builder, Location loc, ValuePtr ptr);
/// Builds IR extracting the offset from the descriptor.
- Value *offset(OpBuilder &builder, Location loc);
+ ValuePtr offset(OpBuilder &builder, Location loc);
/// Builds IR inserting the offset into the descriptor.
- void setOffset(OpBuilder &builder, Location loc, Value *offset);
+ void setOffset(OpBuilder &builder, Location loc, ValuePtr offset);
void setConstantOffset(OpBuilder &builder, Location loc, uint64_t offset);
/// Builds IR extracting the pos-th size from the descriptor.
- Value *size(OpBuilder &builder, Location loc, unsigned pos);
+ ValuePtr size(OpBuilder &builder, Location loc, unsigned pos);
/// Builds IR inserting the pos-th size into the descriptor
- void setSize(OpBuilder &builder, Location loc, unsigned pos, Value *size);
+ void setSize(OpBuilder &builder, Location loc, unsigned pos, ValuePtr size);
void setConstantSize(OpBuilder &builder, Location loc, unsigned pos,
uint64_t size);
/// Builds IR extracting the pos-th size from the descriptor.
- Value *stride(OpBuilder &builder, Location loc, unsigned pos);
+ ValuePtr stride(OpBuilder &builder, Location loc, unsigned pos);
/// Builds IR inserting the pos-th stride into the descriptor
- void setStride(OpBuilder &builder, Location loc, unsigned pos, Value *stride);
+ void setStride(OpBuilder &builder, Location loc, unsigned pos,
+ ValuePtr stride);
void setConstantStride(OpBuilder &builder, Location loc, unsigned pos,
uint64_t stride);
@@ -220,19 +221,19 @@ private:
class UnrankedMemRefDescriptor : public StructBuilder {
public:
/// Construct a helper for the given descriptor value.
- explicit UnrankedMemRefDescriptor(Value *descriptor);
+ explicit UnrankedMemRefDescriptor(ValuePtr descriptor);
/// Builds IR creating an `undef` value of the descriptor type.
static UnrankedMemRefDescriptor undef(OpBuilder &builder, Location loc,
Type descriptorType);
/// Builds IR extracting the rank from the descriptor
- Value *rank(OpBuilder &builder, Location loc);
+ ValuePtr rank(OpBuilder &builder, Location loc);
/// Builds IR setting the rank in the descriptor
- void setRank(OpBuilder &builder, Location loc, Value *value);
+ void setRank(OpBuilder &builder, Location loc, ValuePtr value);
/// Builds IR extracting ranked memref descriptor ptr
- Value *memRefDescPtr(OpBuilder &builder, Location loc);
+ ValuePtr memRefDescPtr(OpBuilder &builder, Location loc);
/// Builds IR setting ranked memref descriptor ptr
- void setMemRefDescPtr(OpBuilder &builder, Location loc, Value *value);
+ void setMemRefDescPtr(OpBuilder &builder, Location loc, ValuePtr value);
};
/// Base class for operation conversions targeting the LLVM IR dialect. Provides
/// conversion patterns with an access to the containing LLVMLowering for the
diff --git a/mlir/include/mlir/Dialect/AffineOps/AffineOps.h b/mlir/include/mlir/Dialect/AffineOps/AffineOps.h
index 36b4e55e77c..764f439e020 100644
--- a/mlir/include/mlir/Dialect/AffineOps/AffineOps.h
+++ b/mlir/include/mlir/Dialect/AffineOps/AffineOps.h
@@ -41,7 +41,7 @@ class OpBuilder;
/// A utility function to check if a value is defined at the top level of a
/// function. A value of index type defined at the top level is always a valid
/// symbol.
-bool isTopLevelValue(Value *value);
+bool isTopLevelValue(ValuePtr value);
class AffineOpsDialect : public Dialect {
public:
@@ -148,18 +148,19 @@ class AffineDmaStartOp : public Op<AffineDmaStartOp, OpTrait::VariadicOperands,
public:
using Op::Op;
- static void build(Builder *builder, OperationState &result, Value *srcMemRef,
- AffineMap srcMap, ValueRange srcIndices, Value *destMemRef,
- AffineMap dstMap, ValueRange destIndices, Value *tagMemRef,
- AffineMap tagMap, ValueRange tagIndices, Value *numElements,
- Value *stride = nullptr,
- Value *elementsPerStride = nullptr);
+ static void build(Builder *builder, OperationState &result,
+ ValuePtr srcMemRef, AffineMap srcMap, ValueRange srcIndices,
+ ValuePtr destMemRef, AffineMap dstMap,
+ ValueRange destIndices, ValuePtr tagMemRef,
+ AffineMap tagMap, ValueRange tagIndices,
+ ValuePtr numElements, ValuePtr stride = nullptr,
+ ValuePtr elementsPerStride = nullptr);
/// Returns the operand index of the src memref.
unsigned getSrcMemRefOperandIndex() { return 0; }
/// Returns the source MemRefType for this DMA operation.
- Value *getSrcMemRef() { return getOperand(getSrcMemRefOperandIndex()); }
+ ValuePtr getSrcMemRef() { return getOperand(getSrcMemRefOperandIndex()); }
MemRefType getSrcMemRefType() {
return getSrcMemRef()->getType().cast<MemRefType>();
}
@@ -191,7 +192,7 @@ public:
}
/// Returns the destination MemRefType for this DMA operations.
- Value *getDstMemRef() { return getOperand(getDstMemRefOperandIndex()); }
+ ValuePtr getDstMemRef() { return getOperand(getDstMemRefOperandIndex()); }
MemRefType getDstMemRefType() {
return getDstMemRef()->getType().cast<MemRefType>();
}
@@ -225,7 +226,7 @@ public:
}
/// Returns the Tag MemRef for this DMA operation.
- Value *getTagMemRef() { return getOperand(getTagMemRefOperandIndex()); }
+ ValuePtr getTagMemRef() { return getOperand(getTagMemRefOperandIndex()); }
MemRefType getTagMemRefType() {
return getTagMemRef()->getType().cast<MemRefType>();
}
@@ -249,13 +250,13 @@ public:
}
/// Returns the number of elements being transferred by this DMA operation.
- Value *getNumElements() {
+ ValuePtr getNumElements() {
return getOperand(getTagMemRefOperandIndex() + 1 +
getTagMap().getNumInputs());
}
/// Returns the AffineMapAttr associated with 'memref'.
- NamedAttribute getAffineMapAttrForMemRef(Value *memref) {
+ NamedAttribute getAffineMapAttrForMemRef(ValuePtr memref) {
if (memref == getSrcMemRef())
return {Identifier::get(getSrcMapAttrName(), getContext()),
getSrcMapAttr()};
@@ -305,14 +306,14 @@ public:
}
/// Returns the stride value for this DMA operation.
- Value *getStride() {
+ ValuePtr getStride() {
if (!isStrided())
return nullptr;
return getOperand(getNumOperands() - 1 - 1);
}
/// Returns the number of elements to transfer per stride for this DMA op.
- Value *getNumElementsPerStride() {
+ ValuePtr getNumElementsPerStride() {
if (!isStrided())
return nullptr;
return getOperand(getNumOperands() - 1);
@@ -337,14 +338,14 @@ class AffineDmaWaitOp : public Op<AffineDmaWaitOp, OpTrait::VariadicOperands,
public:
using Op::Op;
- static void build(Builder *builder, OperationState &result, Value *tagMemRef,
- AffineMap tagMap, ValueRange tagIndices,
- Value *numElements);
+ static void build(Builder *builder, OperationState &result,
+ ValuePtr tagMemRef, AffineMap tagMap, ValueRange tagIndices,
+ ValuePtr numElements);
static StringRef getOperationName() { return "affine.dma_wait"; }
// Returns the Tag MemRef associated with the DMA operation being waited on.
- Value *getTagMemRef() { return getOperand(0); }
+ ValuePtr getTagMemRef() { return getOperand(0); }
MemRefType getTagMemRefType() {
return getTagMemRef()->getType().cast<MemRefType>();
}
@@ -367,14 +368,16 @@ public:
}
/// Returns the AffineMapAttr associated with 'memref'.
- NamedAttribute getAffineMapAttrForMemRef(Value *memref) {
+ NamedAttribute getAffineMapAttrForMemRef(ValuePtr memref) {
assert(memref == getTagMemRef());
return {Identifier::get(getTagMapAttrName(), getContext()),
getTagMapAttr()};
}
/// Returns the number of elements transferred in the associated DMA op.
- Value *getNumElements() { return getOperand(1 + getTagMap().getNumInputs()); }
+ ValuePtr getNumElements() {
+ return getOperand(1 + getTagMap().getNumInputs());
+ }
static StringRef getTagMapAttrName() { return "tag_map"; }
static ParseResult parse(OpAsmParser &parser, OperationState &result);
@@ -409,18 +412,18 @@ public:
static void build(Builder *builder, OperationState &result, AffineMap map,
ValueRange operands);
/// Builds an affine load op with an identity map and operands.
- static void build(Builder *builder, OperationState &result, Value *memref,
+ static void build(Builder *builder, OperationState &result, ValuePtr memref,
ValueRange indices = {});
/// Builds an affine load op with the specified map and its operands.
- static void build(Builder *builder, OperationState &result, Value *memref,
+ static void build(Builder *builder, OperationState &result, ValuePtr memref,
AffineMap map, ValueRange mapOperands);
/// Returns the operand index of the memref.
unsigned getMemRefOperandIndex() { return 0; }
/// Get memref operand.
- Value *getMemRef() { return getOperand(getMemRefOperandIndex()); }
- void setMemRef(Value *value) { setOperand(getMemRefOperandIndex(), value); }
+ ValuePtr getMemRef() { return getOperand(getMemRefOperandIndex()); }
+ void setMemRef(ValuePtr value) { setOperand(getMemRefOperandIndex(), value); }
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
}
@@ -435,7 +438,7 @@ public:
}
/// Returns the AffineMapAttr associated with 'memref'.
- NamedAttribute getAffineMapAttrForMemRef(Value *memref) {
+ NamedAttribute getAffineMapAttrForMemRef(ValuePtr memref) {
assert(memref == getMemRef());
return {Identifier::get(getMapAttrName(), getContext()),
getAffineMapAttr()};
@@ -476,21 +479,21 @@ public:
/// Builds an affine store operation with the provided indices (identity map).
static void build(Builder *builder, OperationState &result,
- Value *valueToStore, Value *memref, ValueRange indices);
+ ValuePtr valueToStore, ValuePtr memref, ValueRange indices);
/// Builds an affine store operation with the specified map and its operands.
static void build(Builder *builder, OperationState &result,
- Value *valueToStore, Value *memref, AffineMap map,
+ ValuePtr valueToStore, ValuePtr memref, AffineMap map,
ValueRange mapOperands);
/// Get value to be stored by store operation.
- Value *getValueToStore() { return getOperand(0); }
+ ValuePtr getValueToStore() { return getOperand(0); }
/// Returns the operand index of the memref.
unsigned getMemRefOperandIndex() { return 1; }
/// Get memref operand.
- Value *getMemRef() { return getOperand(getMemRefOperandIndex()); }
- void setMemRef(Value *value) { setOperand(getMemRefOperandIndex(), value); }
+ ValuePtr getMemRef() { return getOperand(getMemRefOperandIndex()); }
+ void setMemRef(ValuePtr value) { setOperand(getMemRefOperandIndex(), value); }
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
@@ -506,7 +509,7 @@ public:
}
/// Returns the AffineMapAttr associated with 'memref'.
- NamedAttribute getAffineMapAttrForMemRef(Value *memref) {
+ NamedAttribute getAffineMapAttrForMemRef(ValuePtr memref) {
assert(memref == getMemRef());
return {Identifier::get(getMapAttrName(), getContext()),
getAffineMapAttr()};
@@ -526,10 +529,10 @@ public:
};
/// Returns true if the given Value can be used as a dimension id.
-bool isValidDim(Value *value);
+bool isValidDim(ValuePtr value);
/// Returns true if the given Value can be used as a symbol.
-bool isValidSymbol(Value *value);
+bool isValidSymbol(ValuePtr value);
/// Modifies both `map` and `operands` in-place so as to:
/// 1. drop duplicate operands
@@ -538,17 +541,17 @@ bool isValidSymbol(Value *value);
/// dimensional operands
/// 4. propagate constant operands and drop them
void canonicalizeMapAndOperands(AffineMap *map,
- SmallVectorImpl<Value *> *operands);
+ SmallVectorImpl<ValuePtr> *operands);
/// Canonicalizes an integer set the same way canonicalizeMapAndOperands does
/// for affine maps.
void canonicalizeSetAndOperands(IntegerSet *set,
- SmallVectorImpl<Value *> *operands);
+ SmallVectorImpl<ValuePtr> *operands);
/// Returns a composed AffineApplyOp by composing `map` and `operands` with
/// other AffineApplyOps supplying those operands. The operands of the resulting
/// AffineApplyOp do not change the length of AffineApplyOp chains.
AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<Value *> operands);
+ ArrayRef<ValuePtr> operands);
/// Given an affine map `map` and its input `operands`, this method composes
/// into `map`, maps of AffineApplyOps whose results are the values in
@@ -558,22 +561,22 @@ AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map,
/// terminal symbol, i.e., a symbol defined at the top level or a block/function
/// argument.
void fullyComposeAffineMapAndOperands(AffineMap *map,
- SmallVectorImpl<Value *> *operands);
+ SmallVectorImpl<ValuePtr> *operands);
#define GET_OP_CLASSES
#include "mlir/Dialect/AffineOps/AffineOps.h.inc"
/// Returns if the provided value is the induction variable of a AffineForOp.
-bool isForInductionVar(Value *val);
+bool isForInductionVar(ValuePtr val);
/// Returns the loop parent of an induction variable. If the provided value is
/// not an induction variable, then return nullptr.
-AffineForOp getForInductionVarOwner(Value *val);
+AffineForOp getForInductionVarOwner(ValuePtr val);
/// Extracts the induction variables from a list of AffineForOps and places them
/// in the output argument `ivs`.
void extractForInductionVars(ArrayRef<AffineForOp> forInsts,
- SmallVectorImpl<Value *> *ivs);
+ SmallVectorImpl<ValuePtr> *ivs);
/// AffineBound represents a lower or upper bound in the for operation.
/// This class does not own the underlying operands. Instead, it refers
@@ -588,7 +591,7 @@ public:
AffineValueMap getAsAffineValueMap();
unsigned getNumOperands() { return opEnd - opStart; }
- Value *getOperand(unsigned idx) { return op.getOperand(opStart + idx); }
+ ValuePtr getOperand(unsigned idx) { return op.getOperand(opStart + idx); }
using operand_iterator = AffineForOp::operand_iterator;
using operand_range = AffineForOp::operand_range;
@@ -613,7 +616,7 @@ private:
};
/// An `AffineApplyNormalizer` is a helper class that supports renumbering
-/// operands of AffineApplyOp. This acts as a reindexing map of Value* to
+/// operands of AffineApplyOp. This acts as a reindexing map of Value to
/// positional dims or symbols and allows simplifications such as:
///
/// ```mlir
@@ -626,13 +629,13 @@ private:
/// %1 = affine.apply () -> (0)
/// ```
struct AffineApplyNormalizer {
- AffineApplyNormalizer(AffineMap map, ArrayRef<Value *> operands);
+ AffineApplyNormalizer(AffineMap map, ArrayRef<ValuePtr> operands);
/// Returns the AffineMap resulting from normalization.
AffineMap getAffineMap() { return affineMap; }
- SmallVector<Value *, 8> getOperands() {
- SmallVector<Value *, 8> res(reorderedDims);
+ SmallVector<ValuePtr, 8> getOperands() {
+ SmallVector<ValuePtr, 8> res(reorderedDims);
res.append(concatenatedSymbols.begin(), concatenatedSymbols.end());
return res;
}
@@ -642,13 +645,13 @@ struct AffineApplyNormalizer {
/// Normalizes 'otherMap' and its operands 'otherOperands' to map to this
/// normalizer's coordinate space.
- void normalize(AffineMap *otherMap, SmallVectorImpl<Value *> *otherOperands);
+ void normalize(AffineMap *otherMap, SmallVectorImpl<ValuePtr> *otherOperands);
private:
/// Helper function to insert `v` into the coordinate system of the current
/// AffineApplyNormalizer. Returns the AffineDimExpr with the corresponding
/// renumbered position.
- AffineDimExpr renumberOneDim(Value *v);
+ AffineDimExpr renumberOneDim(ValuePtr v);
/// Given an `other` normalizer, this rewrites `other.affineMap` in the
/// coordinate system of the current AffineApplyNormalizer.
@@ -656,13 +659,13 @@ private:
/// `this`.
AffineMap renumber(const AffineApplyNormalizer &other);
- /// Maps of Value* to position in `affineMap`.
- DenseMap<Value *, unsigned> dimValueToPosition;
+ /// Maps of Value to position in `affineMap`.
+ DenseMap<ValuePtr, unsigned> dimValueToPosition;
/// Ordered dims and symbols matching positional dims and symbols in
/// `affineMap`.
- SmallVector<Value *, 8> reorderedDims;
- SmallVector<Value *, 8> concatenatedSymbols;
+ SmallVector<ValuePtr, 8> reorderedDims;
+ SmallVector<ValuePtr, 8> concatenatedSymbols;
AffineMap affineMap;
diff --git a/mlir/include/mlir/Dialect/AffineOps/AffineOps.td b/mlir/include/mlir/Dialect/AffineOps/AffineOps.td
index b40990ecb5d..befdc2f6237 100644
--- a/mlir/include/mlir/Dialect/AffineOps/AffineOps.td
+++ b/mlir/include/mlir/Dialect/AffineOps/AffineOps.td
@@ -101,7 +101,7 @@ def AffineForOp : Affine_Op<"for",
static StringRef getUpperBoundAttrName() { return "upper_bound"; }
Block *getBody() { return &region().front(); }
- Value *getInductionVar() { return getBody()->getArgument(0); }
+ ValuePtr getInductionVar() { return getBody()->getArgument(0); }
OpBuilder getBodyBuilder() {
return OpBuilder(getBody(), std::prev(getBody()->end()));
}
@@ -286,8 +286,8 @@ def AffinePrefetchOp : Affine_Op<"prefetch"> {
BoolAttr:$isDataCache);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *memref,"
- "AffineMap map, ArrayRef<Value *> mapOperands, bool isWrite,"
+ "Builder *builder, OperationState &result, ValuePtr memref,"
+ "AffineMap map, ArrayRef<ValuePtr> mapOperands, bool isWrite,"
"unsigned localityHint, bool isDataCache",
[{
assert(map.getNumInputs() == mapOperands.size()
@@ -315,7 +315,7 @@ def AffinePrefetchOp : Affine_Op<"prefetch"> {
}
/// Returns the AffineMapAttr associated with 'memref'.
- NamedAttribute getAffineMapAttrForMemRef(Value *mref) {
+ NamedAttribute getAffineMapAttrForMemRef(ValuePtr mref) {
assert(mref == memref());
return {Identifier::get(getMapAttrName(), getContext()),
getAffineMapAttr()};
diff --git a/mlir/include/mlir/Dialect/GPU/GPUDialect.h b/mlir/include/mlir/Dialect/GPU/GPUDialect.h
index 93c0b13ee3e..12c2aa1bbd1 100644
--- a/mlir/include/mlir/Dialect/GPU/GPUDialect.h
+++ b/mlir/include/mlir/Dialect/GPU/GPUDialect.h
@@ -77,9 +77,9 @@ public:
/// Utility class for the GPU dialect to represent triples of `Value`s
/// accessible through `.x`, `.y`, and `.z` similarly to CUDA notation.
struct KernelDim3 {
- Value *x;
- Value *y;
- Value *z;
+ ValuePtr x;
+ ValuePtr y;
+ ValuePtr z;
};
#define GET_OP_CLASSES
diff --git a/mlir/include/mlir/Dialect/GPU/GPUOps.td b/mlir/include/mlir/Dialect/GPU/GPUOps.td
index 6751f0a3f70..def1ff2b8a1 100644
--- a/mlir/include/mlir/Dialect/GPU/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/GPUOps.td
@@ -157,7 +157,7 @@ def GPU_GPUFuncOp : GPU_Op<"func", [FunctionLike, IsolatedFromAbove, Symbol]> {
/// Returns a list of block arguments that correspond to buffers located in
/// the workgroup memory
- ArrayRef<BlockArgument *> getWorkgroupAttributions() {
+ ArrayRef<BlockArgumentPtr> getWorkgroupAttributions() {
auto begin =
std::next(getBody().front().args_begin(), getType().getNumInputs());
auto end = std::next(begin, getNumWorkgroupAttributions());
@@ -166,7 +166,7 @@ def GPU_GPUFuncOp : GPU_Op<"func", [FunctionLike, IsolatedFromAbove, Symbol]> {
/// Returns a list of block arguments that correspond to buffers located in
/// the private memory.
- ArrayRef<BlockArgument *> getPrivateAttributions() {
+ ArrayRef<BlockArgumentPtr> getPrivateAttributions() {
auto begin =
std::next(getBody().front().args_begin(),
getType().getNumInputs() + getNumWorkgroupAttributions());
@@ -282,8 +282,8 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func">,
let builders = [
OpBuilder<"Builder *builder, OperationState &result, GPUFuncOp kernelFunc, "
- "Value *gridSizeX, Value *gridSizeY, Value *gridSizeZ, "
- "Value *blockSizeX, Value *blockSizeY, Value *blockSizeZ, "
+ "ValuePtr gridSizeX, ValuePtr gridSizeY, ValuePtr gridSizeZ, "
+ "ValuePtr blockSizeX, ValuePtr blockSizeY, ValuePtr blockSizeZ, "
"ValueRange kernelOperands">,
OpBuilder<"Builder *builder, OperationState &result, GPUFuncOp kernelFunc, "
"KernelDim3 gridSize, KernelDim3 blockSize, "
@@ -302,7 +302,7 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func">,
StringRef getKernelModuleName();
/// The i-th operand passed to the kernel function.
- Value *getKernelOperand(unsigned i);
+ ValuePtr getKernelOperand(unsigned i);
/// Get the SSA values passed as operands to specify the grid size.
KernelDim3 getGridSizeOperandValues();
@@ -415,9 +415,9 @@ def GPU_LaunchOp : GPU_Op<"launch", [IsolatedFromAbove]>,
let skipDefaultBuilders = 1;
let builders = [
- OpBuilder<"Builder *builder, OperationState &result, Value *gridSizeX,"
- "Value *gridSizeY, Value *gridSizeZ, Value *blockSizeX,"
- "Value *blockSizeY, Value *blockSizeZ,"
+ OpBuilder<"Builder *builder, OperationState &result, ValuePtr gridSizeX,"
+ "ValuePtr gridSizeY, ValuePtr gridSizeZ, ValuePtr blockSizeX,"
+ "ValuePtr blockSizeY, ValuePtr blockSizeZ,"
"ValueRange operands">
];
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
index dae27d00e5a..a599d51b31f 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
@@ -194,9 +194,9 @@ private:
/// surrounding the insertion point of builder. Obtain the address of that
/// global and use it to compute the address of the first character in the
/// string (operations inserted at the builder insertion point).
-Value *createGlobalString(Location loc, OpBuilder &builder, StringRef name,
- StringRef value, LLVM::Linkage linkage,
- LLVM::LLVMDialect *llvmDialect);
+ValuePtr createGlobalString(Location loc, OpBuilder &builder, StringRef name,
+ StringRef value, LLVM::Linkage linkage,
+ LLVM::LLVMDialect *llvmDialect);
/// LLVM requires some operations to be inside of a Module operation. This
/// function confirms that the Operation has the desired properties.
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index 00acc539dab..cfbbf7da65d 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -185,8 +185,8 @@ def LLVM_ICmpOp : LLVM_OneResultOp<"icmp", [NoSideEffect]>,
$res = builder.CreateICmp(getLLVMCmpPredicate($predicate), $lhs, $rhs);
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, ICmpPredicate predicate, Value *lhs, "
- "Value *rhs", [{
+ "Builder *b, OperationState &result, ICmpPredicate predicate, ValuePtr lhs, "
+ "ValuePtr rhs", [{
LLVMDialect *dialect = &lhs->getType().cast<LLVMType>().getDialect();
build(b, result, LLVMType::getInt1Ty(dialect),
b->getI64IntegerAttr(static_cast<int64_t>(predicate)), lhs, rhs);
@@ -232,8 +232,8 @@ def LLVM_FCmpOp : LLVM_OneResultOp<"fcmp", [NoSideEffect]>,
$res = builder.CreateFCmp(getLLVMCmpPredicate($predicate), $lhs, $rhs);
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, FCmpPredicate predicate, Value *lhs, "
- "Value *rhs", [{
+ "Builder *b, OperationState &result, FCmpPredicate predicate, ValuePtr lhs, "
+ "ValuePtr rhs", [{
LLVMDialect *dialect = &lhs->getType().cast<LLVMType>().getDialect();
build(b, result, LLVMType::getInt1Ty(dialect),
b->getI64IntegerAttr(static_cast<int64_t>(predicate)), lhs, rhs);
@@ -265,7 +265,7 @@ def LLVM_AllocaOp :
$res = alloca;
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Type resultType, Value *arraySize, "
+ "Builder *b, OperationState &result, Type resultType, ValuePtr arraySize, "
"unsigned alignment",
[{
if (alignment == 0)
@@ -292,7 +292,7 @@ def LLVM_GEPOp : LLVM_OneResultOp<"getelementptr", [NoSideEffect]>,
def LLVM_LoadOp : LLVM_OneResultOp<"load">, Arguments<(ins LLVM_Type:$addr)>,
LLVM_Builder<"$res = builder.CreateLoad($addr);"> {
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *addr",
+ "Builder *b, OperationState &result, ValuePtr addr",
[{
auto type = addr->getType().cast<LLVM::LLVMType>().getPointerElementTy();
build(b, result, type, addr);
@@ -353,7 +353,7 @@ def LLVM_ExtractElementOp : LLVM_OneResultOp<"extractelement", [NoSideEffect]>,
$res = builder.CreateExtractElement($vector, $position);
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *vector, Value *position,"
+ "Builder *b, OperationState &result, ValuePtr vector, ValuePtr position,"
"ArrayRef<NamedAttribute> attrs = {}">];
let parser = [{ return parseExtractElementOp(parser, result); }];
let printer = [{ printExtractElementOp(p, *this); }];
@@ -384,7 +384,7 @@ def LLVM_InsertValueOp : LLVM_OneResultOp<"insertvalue", [NoSideEffect]>,
extractPosition($position));
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *container, Value *value, "
+ "Builder *b, OperationState &result, ValuePtr container, ValuePtr value, "
"ArrayAttr position",
[{
build(b, result, container->getType(), container, value, position);
@@ -398,7 +398,7 @@ def LLVM_ShuffleVectorOp
LLVM_Builder<
"$res = builder.CreateShuffleVector($v1, $v2, extractPosition($mask));"> {
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *v1, Value *v2, "
+ "Builder *b, OperationState &result, ValuePtr v1, ValuePtr v2, "
"ArrayAttr mask, ArrayRef<NamedAttribute> attrs = {}">];
let verifier = [{
auto wrappedVectorType1 = v1()->getType().cast<LLVM::LLVMType>();
@@ -422,8 +422,8 @@ def LLVM_SelectOp
LLVM_Builder<
"$res = builder.CreateSelect($condition, $trueValue, $falseValue);"> {
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *condition, Value *lhs, "
- "Value *rhs", [{
+ "Builder *b, OperationState &result, ValuePtr condition, ValuePtr lhs, "
+ "ValuePtr rhs", [{
build(b, result, lhs->getType(), condition, lhs, rhs);
}]>];
let parser = [{ return parseSelectOp(parser, result); }];
diff --git a/mlir/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h b/mlir/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h
index 01d3e4b239c..426708b14a8 100644
--- a/mlir/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h
+++ b/mlir/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h
@@ -37,15 +37,15 @@ class LinalgOp;
class Aliases {
public:
/// Returns true if v1 and v2 alias.
- bool alias(Value *v1, Value *v2) { return find(v1) == find(v2); }
+ bool alias(ValuePtr v1, ValuePtr v2) { return find(v1) == find(v2); }
private:
/// Returns the base buffer or block argument into which the view `v` aliases.
/// This lazily records the new aliases discovered while walking back the
/// use-def chain.
- Value *find(Value *v);
+ ValuePtr find(ValuePtr v);
- DenseMap<Value *, Value *> aliases;
+ DenseMap<ValuePtr, ValuePtr> aliases;
};
/// Data structure for holding a dependence graph that operates on LinalgOp and
@@ -54,7 +54,7 @@ class LinalgDependenceGraph {
public:
struct LinalgOpView {
Operation *op;
- Value *view;
+ ValuePtr view;
};
struct LinalgDependenceGraphElem {
// dependentOpView may be either:
@@ -64,7 +64,7 @@ public:
// View in the op that is used to index in the graph:
// 1. src in the case of dependencesFromDstGraphs.
// 2. dst in the case of dependencesIntoGraphs.
- Value *indexingView;
+ ValuePtr indexingView;
};
using LinalgDependences = SmallVector<LinalgDependenceGraphElem, 8>;
using DependenceGraph = DenseMap<Operation *, LinalgDependences>;
@@ -97,14 +97,14 @@ public:
/// Dependences are restricted to views aliasing `view`.
SmallVector<Operation *, 8> findCoveringReads(LinalgOp srcLinalgOp,
LinalgOp dstLinalgOp,
- Value *view) const;
+ ValuePtr view) const;
/// Returns the operations that are interleaved between `srcLinalgOp` and
/// `dstLinalgOp` and that are involved in a WAR or WAW with `srcLinalgOp`.
/// Dependences are restricted to views aliasing `view`.
SmallVector<Operation *, 8> findCoveringWrites(LinalgOp srcLinalgOp,
LinalgOp dstLinalgOp,
- Value *view) const;
+ ValuePtr view) const;
private:
// Keep dependences in both directions, this is not just a performance gain
@@ -130,7 +130,7 @@ private:
/// Implementation detail for findCoveringxxx.
SmallVector<Operation *, 8>
findOperationsWithCoveringDependences(LinalgOp srcLinalgOp,
- LinalgOp dstLinalgOp, Value *view,
+ LinalgOp dstLinalgOp, ValuePtr view,
ArrayRef<DependenceType> types) const;
Aliases &aliases;
diff --git a/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h b/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
index cf6335278b7..8375e750a5c 100644
--- a/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
+++ b/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
@@ -55,34 +55,34 @@ inline StringRef toString(IterType t) {
/// makeLinalgGenericOp({A({m, n}), B({k, n})}, {C({m, n})}, ... );
/// ```
struct StructuredIndexed {
- StructuredIndexed(Value *v) : value(v) {}
+ StructuredIndexed(ValuePtr v) : value(v) {}
StructuredIndexed operator()(ArrayRef<AffineExpr> indexings) {
return StructuredIndexed(value, indexings);
}
- operator Value *() const /* implicit */ { return value; }
+ operator ValuePtr() const /* implicit */ { return value; }
ArrayRef<AffineExpr> getExprs() { return exprs; }
private:
- StructuredIndexed(Value *v, ArrayRef<AffineExpr> indexings)
+ StructuredIndexed(ValuePtr v, ArrayRef<AffineExpr> indexings)
: value(v), exprs(indexings.begin(), indexings.end()) {
assert(v->getType().isa<MemRefType>() && "MemRefType expected");
}
StructuredIndexed(ValueHandle v, ArrayRef<AffineExpr> indexings)
: StructuredIndexed(v.getValue(), indexings) {}
- Value *value;
+ ValuePtr value;
SmallVector<AffineExpr, 4> exprs;
};
-inline void defaultRegionBuilder(ArrayRef<BlockArgument *> args) {}
+inline void defaultRegionBuilder(ArrayRef<BlockArgumentPtr> args) {}
Operation *makeLinalgGenericOp(ArrayRef<IterType> iteratorTypes,
ArrayRef<StructuredIndexed> inputs,
ArrayRef<StructuredIndexed> outputs,
- function_ref<void(ArrayRef<BlockArgument *>)>
+ function_ref<void(ArrayRef<BlockArgumentPtr>)>
regionBuilder = defaultRegionBuilder,
- ArrayRef<Value *> otherValues = {},
+ ArrayRef<ValuePtr> otherValues = {},
ArrayRef<Attribute> otherAttributes = {});
namespace ops {
@@ -96,7 +96,7 @@ using edsc::intrinsics::linalg_yield;
/// Build the body of a region to compute a multiply-accumulate, under the
/// current ScopedContext, at the current insert point.
-void macRegionBuilder(ArrayRef<BlockArgument *> args);
+void macRegionBuilder(ArrayRef<BlockArgumentPtr> args);
/// TODO(ntv): In the future we should tie these implementations to something in
/// Tablegen that generates the proper interfaces and the proper sugared named
@@ -120,7 +120,7 @@ void macRegionBuilder(ArrayRef<BlockArgument *> args);
/// with in-place semantics and parallelism.
/// Unary pointwise operation (with broadcast) entry point.
-using UnaryPointwiseOpBuilder = function_ref<Value *(ValueHandle)>;
+using UnaryPointwiseOpBuilder = function_ref<ValuePtr(ValueHandle)>;
Operation *linalg_pointwise(UnaryPointwiseOpBuilder unaryOp,
StructuredIndexed I, StructuredIndexed O);
@@ -131,7 +131,7 @@ Operation *linalg_pointwise_tanh(StructuredIndexed I, StructuredIndexed O);
/// Binary pointwise operation (with broadcast) entry point.
using BinaryPointwiseOpBuilder =
- function_ref<Value *(ValueHandle, ValueHandle)>;
+ function_ref<ValuePtr(ValueHandle, ValueHandle)>;
Operation *linalg_pointwise(BinaryPointwiseOpBuilder binaryOp,
StructuredIndexed I1, StructuredIndexed I2,
StructuredIndexed O);
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td
index 12318a244df..18ca31cc376 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td
@@ -92,22 +92,22 @@ def LinalgLibraryInterface : OpInterface<"LinalgOp"> {
"Query the number of loops within the current operation.",
"unsigned", "getNumLoops">,
InterfaceMethod<"Query the input view at the given index.",
- "Value *", "getInput", (ins "unsigned":$i)
+ "ValuePtr ", "getInput", (ins "unsigned":$i)
>,
InterfaceMethod<"Query the output view at the given index.",
- "Value *", "getOutput", (ins "unsigned":$i)
+ "ValuePtr ", "getOutput", (ins "unsigned":$i)
>,
InterfaceMethod<[{
Query the index of the given input value, or `None` if the value is not
an input.
}],
- "Optional<unsigned>", "getIndexOfInput", (ins "Value *":$view)
+ "Optional<unsigned>", "getIndexOfInput", (ins "ValuePtr ":$view)
>,
InterfaceMethod<[{
Query the index of the given view value, or `None` if the value is not
an view.
}],
- "Optional<unsigned>", "getIndexOfOutput", (ins "Value *":$view)
+ "Optional<unsigned>", "getIndexOfOutput", (ins "ValuePtr ":$view)
>,
InterfaceMethod<[{
Query the type of the input view at the given index.
@@ -228,7 +228,7 @@ def CopyOp : LinalgLibrary_Op<"copy", [NInputs<1>, NOutputs<1>]> {
// TODO(ntv) this should go away once the usage of OptionalAttr triggers
// emission of builders with default arguments left unspecified.
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *input, Value *output", [{
+ "Builder *builder, OperationState &result, ValuePtr input, ValuePtr output", [{
return build(
builder, result, input, output, AffineMapAttr(), AffineMapAttr());
}]>];
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
index b806d7548fb..5d402a9ded9 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
@@ -56,8 +56,8 @@ def Linalg_RangeOp :
````
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *min, Value *max, "
- "Value *step",
+ "Builder *builder, OperationState &result, ValuePtr min, ValuePtr max, "
+ "ValuePtr step",
[{
auto rangeType = RangeType::get(builder->getContext());
build(builder, result, rangeType, min, max, step);
@@ -112,7 +112,7 @@ def Linalg_SliceOp : Linalg_Op<"slice", [NoSideEffect]>,
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *base, "
+ "Builder *b, OperationState &result, ValuePtr base, "
"ValueRange indexings">];
let extraClassDeclaration = [{
@@ -124,12 +124,12 @@ def Linalg_SliceOp : Linalg_Op<"slice", [NoSideEffect]>,
MemRefType getBaseViewType() { return view()->getType().cast<MemRefType>(); }
// Get the underlying indexing at a given rank.
- Value *indexing(unsigned rank) { return *(indexings().begin() + rank); }
+ ValuePtr indexing(unsigned rank) { return *(indexings().begin() + rank); }
// Get the subset of indexings that are of RangeType.
- SmallVector<Value *, 8> getRanges() {
- SmallVector<Value *, 8> res;
- for (auto *operand : indexings())
+ SmallVector<ValuePtr, 8> getRanges() {
+ SmallVector<ValuePtr, 8> res;
+ for (auto operand : indexings())
if (!operand->getType().isa<IndexType>())
res.push_back(operand);
return res;
@@ -154,7 +154,7 @@ def Linalg_TransposeOp : Linalg_Op<"transpose", [NoSideEffect]>,
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *view, "
+ "Builder *b, OperationState &result, ValuePtr view, "
"AffineMapAttr permutation, ArrayRef<NamedAttribute> attrs = {}">];
let verifier = [{
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 75b63c93cd8..774be6616cd 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -92,22 +92,22 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
"Query the number of loops within the current operation.",
"unsigned", "getNumLoops">,
InterfaceMethod<"Query the input view at the given index.",
- "Value *", "getInput", (ins "unsigned":$i)
+ "ValuePtr ", "getInput", (ins "unsigned":$i)
>,
InterfaceMethod<"Query the output view at the given index.",
- "Value *", "getOutput", (ins "unsigned":$i)
+ "ValuePtr ", "getOutput", (ins "unsigned":$i)
>,
InterfaceMethod<[{
Query the index of the given input value, or `None` if the value is not
an input.
}],
- "llvm::Optional<unsigned>", "getIndexOfInput", (ins "Value *":$view)
+ "llvm::Optional<unsigned>", "getIndexOfInput", (ins "ValuePtr ":$view)
>,
InterfaceMethod<[{
Query the index of the given view value, or `None` if the value is not
an view.
}],
- "llvm::Optional<unsigned>", "getIndexOfOutput", (ins "Value *":$view)
+ "llvm::Optional<unsigned>", "getIndexOfOutput", (ins "ValuePtr ":$view)
>,
InterfaceMethod<[{
Query the type of the input view at the given index.
@@ -228,7 +228,7 @@ def CopyOp : LinalgStructured_Op<"copy", [NInputs<1>, NOutputs<1>]> {
// TODO(ntv) this should go away once the usage of OptionalAttr triggers
// emission of builders with default arguments left unspecified.
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *input, Value *output", [{
+ "Builder *builder, OperationState &result, ValuePtr input, ValuePtr output", [{
return build(
builder, result, input, output, AffineMapAttr(), AffineMapAttr());
}]>];
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h
index a24c1ca63c4..d196e6ccf94 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h
@@ -77,13 +77,13 @@ private:
public:
/// Return the `i`-th input view.
- Value *getInput(unsigned i) {
+ ValuePtr getInput(unsigned i) {
assert(i < nInputs());
return this->getOperation()->getOperand(i);
}
/// Return the index of `view` in the list of input views if found, llvm::None
/// otherwise.
- Optional<unsigned> getIndexOfInput(Value *view) {
+ Optional<unsigned> getIndexOfInput(ValuePtr view) {
auto it = llvm::find(getInputs(), view);
if (it != getInputs().end())
return it - getInputs().begin();
@@ -99,12 +99,12 @@ public:
return {range.begin(), range.begin() + nInputs()};
}
/// Return the `i`-th output view.
- Value *getOutput(unsigned i) {
+ ValuePtr getOutput(unsigned i) {
return this->getOperation()->getOperand(nInputs() + i);
}
/// Return the index of `view` in the list of output views if found,
/// llvm::None otherwise.
- Optional<unsigned> getIndexOfOutput(Value *view) {
+ Optional<unsigned> getIndexOfOutput(ValuePtr view) {
auto it = llvm::find(getOutputs(), view);
if (it != getOutputs().end())
return it - getOutputs().begin();
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td b/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td
index 415dd918f74..dbc162f4132 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td
@@ -45,7 +45,7 @@ class AffineMapDomainHasDim<int n> : CPred<[{
class HasOperandsOfType<string type>: CPred<[{
llvm::any_of($0.getOperands(),
- [](Value* v) {
+ [](ValuePtr v) {
return dyn_cast_or_null<}] # type # [{>(v->getDefiningOp());
})
}]>;
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h
index dfbac5ac193..a1a7458ae7f 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h
@@ -38,7 +38,7 @@ struct LinalgTransforms {
namespace detail {
// Implementation detail of isProducedByOpOfType avoids the need for explicit
// template instantiations.
-bool isProducedByOpOfTypeImpl(Operation *consumerOp, Value *consumedView,
+bool isProducedByOpOfTypeImpl(Operation *consumerOp, ValuePtr consumedView,
function_ref<bool(Operation *)> isaOpType);
} // namespace detail
@@ -46,7 +46,7 @@ bool isProducedByOpOfTypeImpl(Operation *consumerOp, Value *consumedView,
// an op of type `OpTy`. This is used to implement use-def type information on
// buffers.
template <typename OpTy>
-bool isProducedByOpOfType(Operation *consumerOp, Value *consumedView) {
+bool isProducedByOpOfType(Operation *consumerOp, ValuePtr consumedView) {
return detail::isProducedByOpOfTypeImpl(
consumerOp, consumedView, [](Operation *op) { return isa<OpTy>(op); });
}
diff --git a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
index f8d10ecfa57..50039dd9336 100644
--- a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
@@ -34,7 +34,7 @@ namespace edsc {
/// A LoopRangeBuilder is a generic NestedBuilder for loop.for operations.
/// More specifically it is meant to be used as a temporary object for
-/// representing any nested MLIR construct that is "related to" an mlir::Value*
+/// representing any nested MLIR construct that is "related to" an mlir::Value
/// (for now an induction variable).
class LoopRangeBuilder : public NestedBuilder {
public:
@@ -42,7 +42,7 @@ public:
/// variable. A ValueHandle pointer is passed as the first argument and is the
/// *only* way to capture the loop induction variable.
LoopRangeBuilder(ValueHandle *iv, ValueHandle range);
- LoopRangeBuilder(ValueHandle *iv, Value *range);
+ LoopRangeBuilder(ValueHandle *iv, ValuePtr range);
LoopRangeBuilder(ValueHandle *iv, SubViewOp::Range range);
LoopRangeBuilder(const LoopRangeBuilder &) = delete;
@@ -65,7 +65,7 @@ public:
LoopNestRangeBuilder(ArrayRef<edsc::ValueHandle *> ivs,
ArrayRef<edsc::ValueHandle> ranges);
LoopNestRangeBuilder(ArrayRef<edsc::ValueHandle *> ivs,
- ArrayRef<Value *> ranges);
+ ArrayRef<ValuePtr> ranges);
LoopNestRangeBuilder(ArrayRef<edsc::ValueHandle *> ivs,
ArrayRef<SubViewOp::Range> ranges);
edsc::ValueHandle operator()(std::function<void(void)> fun = nullptr);
@@ -88,14 +88,14 @@ struct FusionInfo {
/// whole `consumedView`. This checks structural dominance, that the dependence
/// is a RAW without any interleaved write to any piece of `consumedView`.
bool isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
- LinalgOp consumer, Value *consumedView,
+ LinalgOp consumer, ValuePtr consumedView,
LinalgOp producer);
/// Checks whether fusing the specific `producer` of the `consumedView` is
/// feasible. This checks `producer` is the last write of `consumedView` and
/// that no interleaved dependence would be violated (RAW, WAR or WAW).
bool isFusableInto(const LinalgDependenceGraph &graph, LinalgOp consumer,
- Value *consumedView, LinalgOp producer);
+ ValuePtr consumedView, LinalgOp producer);
/// Fuses producer into consumer if the producer is structurally feasible and
/// the fusion would not violate dependencies.
@@ -111,8 +111,8 @@ Optional<FusionInfo> fuseProducerOf(OpBuilder &b, LinalgOp consumer,
/// the inverse, concatenated loopToOperandRangeMaps to this list allows the
/// derivation of loop ranges for any linalgOp.
template <typename ConcreteOp>
-SmallVector<Value *, 8> getViewSizes(ConcreteOp linalgOp) {
- SmallVector<Value *, 8> res;
+SmallVector<ValuePtr, 8> getViewSizes(ConcreteOp linalgOp) {
+ SmallVector<ValuePtr, 8> res;
for (auto v : linalgOp.getInputsAndOutputs()) {
MemRefType t = v->getType().template cast<MemRefType>();
for (unsigned i = 0; i < t.getRank(); ++i)
@@ -125,10 +125,10 @@ SmallVector<Value *, 8> getViewSizes(ConcreteOp linalgOp) {
/// When non-null, the optional pointer `folder` is used to call into the
/// `createAndFold` builder method. If `folder` is null, the regular `create`
/// method is called.
-SmallVector<Value *, 4> applyMapToValues(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<Value *> values,
- OperationFolder *folder = nullptr);
+SmallVector<ValuePtr, 4> applyMapToValues(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<ValuePtr> values,
+ OperationFolder *folder = nullptr);
struct TiledLinalgOp {
LinalgOp op;
@@ -151,7 +151,7 @@ struct TiledLinalgOp {
/// `createAndFold` builder method. If `folder` is null, the regular `create`
/// method is called.
Optional<TiledLinalgOp> tileLinalgOp(OpBuilder &b, LinalgOp op,
- ArrayRef<Value *> tileSizes,
+ ArrayRef<ValuePtr> tileSizes,
ArrayRef<unsigned> permutation = {},
OperationFolder *folder = nullptr);
@@ -182,9 +182,9 @@ Optional<TiledLinalgOp> tileLinalgOperation(OpBuilder &b, Operation *op,
}
struct PromotionInfo {
- Value *buffer;
- Value *fullLocalView;
- Value *partialLocalView;
+ ValuePtr buffer;
+ ValuePtr fullLocalView;
+ ValuePtr partialLocalView;
};
/// Promotes the `subViews` into a new buffer allocated at the insertion point
@@ -199,13 +199,13 @@ struct PromotionInfo {
/// Returns a list of PromotionInfo which hold the promoted buffer and the
/// full and partial views indexing into the buffer.
SmallVector<PromotionInfo, 8>
-promoteSubViews(OpBuilder &b, Location loc, ArrayRef<Value *> subViews,
+promoteSubViews(OpBuilder &b, Location loc, ArrayRef<ValuePtr> subViews,
bool dynamicBuffers = false, OperationFolder *folder = nullptr);
/// Returns all the operands of `linalgOp` that are not views.
/// Asserts that these operands are value types to allow transformations like
/// tiling to just use the values when cloning `linalgOp`.
-SmallVector<Value *, 4> getAssumedNonViewOperands(LinalgOp linalgOp);
+SmallVector<ValuePtr, 4> getAssumedNonViewOperands(LinalgOp linalgOp);
/// Apply the permutation defined by `permutation` to `inVec`.
/// Element `i` in `inVec` is mapped to location `j = permutation[i]`.
@@ -226,7 +226,7 @@ void applyPermutationToVector(SmallVector<T, N> &inVec,
/// It is the entry point for declarative transformation
/// Returns the cloned `LinalgOp` with the new operands
LinalgOp promoteSubViewOperands(OpBuilder &b, LinalgOp op,
- llvm::SetVector<Value *> subViews,
+ llvm::SetVector<ValuePtr> subViews,
bool dynamicBuffers = false,
OperationFolder *folder = nullptr);
diff --git a/mlir/include/mlir/Dialect/LoopOps/LoopOps.h b/mlir/include/mlir/Dialect/LoopOps/LoopOps.h
index fdadf4a40dd..e7ff6f84977 100644
--- a/mlir/include/mlir/Dialect/LoopOps/LoopOps.h
+++ b/mlir/include/mlir/Dialect/LoopOps/LoopOps.h
@@ -50,7 +50,7 @@ void ensureLoopTerminator(Region &region, Builder &builder, Location loc);
/// Returns the loop parent of an induction variable. If the provided value is
/// not an induction variable, then return nullptr.
-ForOp getForInductionVarOwner(Value *val);
+ForOp getForInductionVarOwner(ValuePtr val);
} // end namespace loop
} // end namespace mlir
diff --git a/mlir/include/mlir/Dialect/LoopOps/LoopOps.td b/mlir/include/mlir/Dialect/LoopOps/LoopOps.td
index 5e0b8098411..e0f5b896309 100644
--- a/mlir/include/mlir/Dialect/LoopOps/LoopOps.td
+++ b/mlir/include/mlir/Dialect/LoopOps/LoopOps.td
@@ -74,18 +74,18 @@ def ForOp : Loop_Op<"for",
let skipDefaultBuilders = 1;
let builders = [
OpBuilder<"Builder *builder, OperationState &result, "
- "Value *lowerBound, Value *upperBound, Value *step">
+ "ValuePtr lowerBound, ValuePtr upperBound, ValuePtr step">
];
let extraClassDeclaration = [{
Block *getBody() { return &region().front(); }
- Value *getInductionVar() { return getBody()->getArgument(0); }
+ ValuePtr getInductionVar() { return getBody()->getArgument(0); }
OpBuilder getBodyBuilder() {
return OpBuilder(getBody(), std::prev(getBody()->end()));
}
- void setLowerBound(Value *bound) { getOperation()->setOperand(0, bound); }
- void setUpperBound(Value *bound) { getOperation()->setOperand(1, bound); }
- void setStep(Value *step) { getOperation()->setOperand(2, step); }
+ void setLowerBound(ValuePtr bound) { getOperation()->setOperand(0, bound); }
+ void setUpperBound(ValuePtr bound) { getOperation()->setOperand(1, bound); }
+ void setStep(ValuePtr step) { getOperation()->setOperand(2, step); }
}];
}
@@ -116,7 +116,7 @@ def IfOp : Loop_Op<"if",
let skipDefaultBuilders = 1;
let builders = [
OpBuilder<"Builder *builder, OperationState &result, "
- "Value *cond, bool withElseRegion">
+ "ValuePtr cond, bool withElseRegion">
];
let extraClassDeclaration = [{
diff --git a/mlir/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td b/mlir/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td
index d6e2e1c6fda..d19fd974684 100644
--- a/mlir/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td
@@ -120,7 +120,7 @@ def SPV_CompositeExtractOp : SPV_Op<"CompositeExtract", [NoSideEffect]> {
let builders = [
OpBuilder<[{Builder *builder, OperationState &state,
- Value *composite, ArrayRef<int32_t> indices}]>
+ ValuePtr composite, ArrayRef<int32_t> indices}]>
];
let hasFolder = 1;
diff --git a/mlir/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td b/mlir/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td
index 464b670dae9..32a78024560 100644
--- a/mlir/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td
@@ -132,7 +132,7 @@ def SPV_BranchConditionalOp : SPV_Op<"BranchConditional",
let builders = [
OpBuilder<
- "Builder *builder, OperationState &state, Value *condition, "
+ "Builder *builder, OperationState &state, ValuePtr condition, "
"Block *trueBlock, ValueRange trueArguments, "
"Block *falseBlock, ValueRange falseArguments, "
"Optional<std::pair<uint32_t, uint32_t>> weights = {}",
diff --git a/mlir/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td b/mlir/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td
index 0c4b2902a12..e1e94bcd861 100644
--- a/mlir/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td
@@ -858,8 +858,8 @@ def SPV_SelectOp : SPV_Op<"Select", [NoSideEffect]> {
);
let builders = [OpBuilder<[{Builder *builder, OperationState &state,
- Value *cond, Value *trueValue,
- Value *falseValue}]>];
+ ValuePtr cond, ValuePtr trueValue,
+ ValuePtr falseValue}]>];
}
// -----
diff --git a/mlir/include/mlir/Dialect/SPIRV/SPIRVLowering.h b/mlir/include/mlir/Dialect/SPIRV/SPIRVLowering.h
index f48a1d0b129..37b4ee24237 100644
--- a/mlir/include/mlir/Dialect/SPIRV/SPIRVLowering.h
+++ b/mlir/include/mlir/Dialect/SPIRV/SPIRVLowering.h
@@ -64,8 +64,8 @@ protected:
namespace spirv {
/// Returns a value that represents a builtin variable value within the SPIR-V
/// module.
-Value *getBuiltinVariableValue(Operation *op, spirv::BuiltIn builtin,
- OpBuilder &builder);
+ValuePtr getBuiltinVariableValue(Operation *op, spirv::BuiltIn builtin,
+ OpBuilder &builder);
/// Attribute name for specifying argument ABI information.
StringRef getInterfaceVarABIAttrName();
diff --git a/mlir/include/mlir/Dialect/SPIRV/SPIRVOps.td b/mlir/include/mlir/Dialect/SPIRV/SPIRVOps.td
index 91ea8d7d676..777e5750486 100644
--- a/mlir/include/mlir/Dialect/SPIRV/SPIRVOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/SPIRVOps.td
@@ -102,7 +102,7 @@ def SPV_AccessChainOp : SPV_Op<"AccessChain", [NoSideEffect]> {
);
let builders = [OpBuilder<[{Builder *builder, OperationState &state,
- Value *basePtr, ValueRange indices}]>];
+ ValuePtr basePtr, ValueRange indices}]>];
let hasCanonicalizer = 1;
}
@@ -272,7 +272,7 @@ def SPV_LoadOp : SPV_Op<"Load", []> {
);
let builders = [OpBuilder<[{Builder *builder, OperationState &state,
- Value *basePtr, /*optional*/IntegerAttr memory_access,
+ ValuePtr basePtr, /*optional*/IntegerAttr memory_access,
/*optional*/IntegerAttr alignment}]>];
}
@@ -367,7 +367,7 @@ def SPV_StoreOp : SPV_Op<"Store", []> {
let builders = [
OpBuilder<"Builder *builder, OperationState &state, "
- "Value *ptr, Value *value, ArrayRef<NamedAttribute> namedAttrs", [{
+ "ValuePtr ptr, ValuePtr value, ArrayRef<NamedAttribute> namedAttrs", [{
state.addOperands(ptr);
state.addOperands(value);
state.addAttributes(namedAttrs);
diff --git a/mlir/include/mlir/Dialect/StandardOps/Ops.h b/mlir/include/mlir/Dialect/StandardOps/Ops.h
index 1b1cf02d204..563116823d9 100644
--- a/mlir/include/mlir/Dialect/StandardOps/Ops.h
+++ b/mlir/include/mlir/Dialect/StandardOps/Ops.h
@@ -182,15 +182,15 @@ class DmaStartOp
public:
using Op::Op;
- static void build(Builder *builder, OperationState &result, Value *srcMemRef,
- ValueRange srcIndices, Value *destMemRef,
- ValueRange destIndices, Value *numElements,
- Value *tagMemRef, ValueRange tagIndices,
- Value *stride = nullptr,
- Value *elementsPerStride = nullptr);
+ static void build(Builder *builder, OperationState &result,
+ ValuePtr srcMemRef, ValueRange srcIndices,
+ ValuePtr destMemRef, ValueRange destIndices,
+ ValuePtr numElements, ValuePtr tagMemRef,
+ ValueRange tagIndices, ValuePtr stride = nullptr,
+ ValuePtr elementsPerStride = nullptr);
// Returns the source MemRefType for this DMA operation.
- Value *getSrcMemRef() { return getOperand(0); }
+ ValuePtr getSrcMemRef() { return getOperand(0); }
// Returns the rank (number of indices) of the source MemRefType.
unsigned getSrcMemRefRank() {
return getSrcMemRef()->getType().cast<MemRefType>().getRank();
@@ -202,7 +202,7 @@ public:
}
// Returns the destination MemRefType for this DMA operations.
- Value *getDstMemRef() { return getOperand(1 + getSrcMemRefRank()); }
+ ValuePtr getDstMemRef() { return getOperand(1 + getSrcMemRefRank()); }
// Returns the rank (number of indices) of the destination MemRefType.
unsigned getDstMemRefRank() {
return getDstMemRef()->getType().cast<MemRefType>().getRank();
@@ -222,12 +222,12 @@ public:
}
// Returns the number of elements being transferred by this DMA operation.
- Value *getNumElements() {
+ ValuePtr getNumElements() {
return getOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank());
}
// Returns the Tag MemRef for this DMA operation.
- Value *getTagMemRef() {
+ ValuePtr getTagMemRef() {
return getOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + 1);
}
// Returns the rank (number of indices) of the tag MemRefType.
@@ -276,13 +276,13 @@ public:
1 + 1 + getTagMemRefRank();
}
- Value *getStride() {
+ ValuePtr getStride() {
if (!isStrided())
return nullptr;
return getOperand(getNumOperands() - 1 - 1);
}
- Value *getNumElementsPerStride() {
+ ValuePtr getNumElementsPerStride() {
if (!isStrided())
return nullptr;
return getOperand(getNumOperands() - 1);
@@ -307,13 +307,14 @@ class DmaWaitOp
public:
using Op::Op;
- static void build(Builder *builder, OperationState &result, Value *tagMemRef,
- ValueRange tagIndices, Value *numElements);
+ static void build(Builder *builder, OperationState &result,
+ ValuePtr tagMemRef, ValueRange tagIndices,
+ ValuePtr numElements);
static StringRef getOperationName() { return "std.dma_wait"; }
// Returns the Tag MemRef associated with the DMA operation being waited on.
- Value *getTagMemRef() { return getOperand(0); }
+ ValuePtr getTagMemRef() { return getOperand(0); }
// Returns the tag memref index for this DMA operation.
operand_range getTagIndices() {
@@ -327,7 +328,7 @@ public:
}
// Returns the number of elements transferred in the associated DMA operation.
- Value *getNumElements() { return getOperand(1 + getTagMemRefRank()); }
+ ValuePtr getNumElements() { return getOperand(1 + getTagMemRefRank()); }
static ParseResult parse(OpAsmParser &parser, OperationState &result);
void print(OpAsmPrinter &p);
@@ -342,7 +343,7 @@ void printDimAndSymbolList(Operation::operand_iterator begin,
/// Parses dimension and symbol list and returns true if parsing failed.
ParseResult parseDimAndSymbolList(OpAsmParser &parser,
- SmallVectorImpl<Value *> &operands,
+ SmallVectorImpl<ValuePtr> &operands,
unsigned &numDims);
raw_ostream &operator<<(raw_ostream &os, SubViewOp::Range &range);
diff --git a/mlir/include/mlir/Dialect/StandardOps/Ops.td b/mlir/include/mlir/Dialect/StandardOps/Ops.td
index c26baf6a76e..e00674708f6 100644
--- a/mlir/include/mlir/Dialect/StandardOps/Ops.td
+++ b/mlir/include/mlir/Dialect/StandardOps/Ops.td
@@ -52,7 +52,7 @@ class CastOp<string mnemonic, list<OpTrait> traits = []> :
let results = (outs AnyType);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source, Type destType", [{
+ "Builder *builder, OperationState &result, ValuePtr source, Type destType", [{
impl::buildCastOp(builder, result, source, destType);
}]>];
@@ -191,7 +191,7 @@ def AllocOp : Std_Op<"alloc"> {
}]>,
OpBuilder<
"Builder *builder, OperationState &result, MemRefType memrefType, " #
- "ArrayRef<Value*> operands, IntegerAttr alignment = IntegerAttr()", [{
+ "ArrayRef<ValuePtr> operands, IntegerAttr alignment = IntegerAttr()", [{
result.addOperands(operands);
result.types.push_back(memrefType);
if (alignment)
@@ -330,7 +330,7 @@ def CallIndirectOp : Std_Op<"call_indirect", [CallOpInterface]> {
let results = (outs Variadic<AnyType>);
let builders = [OpBuilder<
- "Builder *, OperationState &result, Value *callee,"
+ "Builder *, OperationState &result, ValuePtr callee,"
"ValueRange operands = {}", [{
result.operands.push_back(callee);
result.addOperands(operands);
@@ -338,7 +338,7 @@ def CallIndirectOp : Std_Op<"call_indirect", [CallOpInterface]> {
}]>];
let extraClassDeclaration = [{
- Value *getCallee() { return getOperand(0); }
+ ValuePtr getCallee() { return getOperand(0); }
/// Get the argument operands to the called function.
operand_range getArgOperands() {
@@ -395,7 +395,7 @@ def CmpFOp : Std_Op<"cmpf",
let builders = [OpBuilder<
"Builder *builder, OperationState &result, CmpFPredicate predicate,"
- "Value *lhs, Value *rhs", [{
+ "ValuePtr lhs, ValuePtr rhs", [{
::buildCmpFOp(builder, result, predicate, lhs, rhs);
}]>];
@@ -463,7 +463,7 @@ def CmpIOp : Std_Op<"cmpi",
let builders = [OpBuilder<
"Builder *builder, OperationState &result, CmpIPredicate predicate,"
- "Value *lhs, Value *rhs", [{
+ "ValuePtr lhs, ValuePtr rhs", [{
::buildCmpIOp(builder, result, predicate, lhs, rhs);
}]>];
@@ -502,7 +502,7 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
let arguments = (ins I1:$condition, Variadic<AnyType>:$branchOperands);
let builders = [OpBuilder<
- "Builder *, OperationState &result, Value *condition,"
+ "Builder *, OperationState &result, ValuePtr condition,"
"Block *trueDest, ValueRange trueOperands,"
"Block *falseDest, ValueRange falseOperands", [{
result.addOperands(condition);
@@ -518,7 +518,7 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
enum { trueIndex = 0, falseIndex = 1 };
// The condition operand is the first operand in the list.
- Value *getCondition() { return getOperand(0); }
+ ValuePtr getCondition() { return getOperand(0); }
/// Return the destination if the condition is true.
Block *getTrueDest() {
@@ -531,12 +531,12 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
}
// Accessors for operands to the 'true' destination.
- Value *getTrueOperand(unsigned idx) {
+ ValuePtr getTrueOperand(unsigned idx) {
assert(idx < getNumTrueOperands());
return getOperand(getTrueDestOperandIndex() + idx);
}
- void setTrueOperand(unsigned idx, Value *value) {
+ void setTrueOperand(unsigned idx, ValuePtr value) {
assert(idx < getNumTrueOperands());
setOperand(getTrueDestOperandIndex() + idx, value);
}
@@ -561,11 +561,11 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
}
// Accessors for operands to the 'false' destination.
- Value *getFalseOperand(unsigned idx) {
+ ValuePtr getFalseOperand(unsigned idx) {
assert(idx < getNumFalseOperands());
return getOperand(getFalseDestOperandIndex() + idx);
}
- void setFalseOperand(unsigned idx, Value *value) {
+ void setFalseOperand(unsigned idx, ValuePtr value) {
assert(idx < getNumFalseOperands());
setOperand(getFalseDestOperandIndex() + idx, value);
}
@@ -678,7 +678,7 @@ def DimOp : Std_Op<"dim", [NoSideEffect]> {
let results = (outs Index);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *memrefOrTensor,"
+ "Builder *builder, OperationState &result, ValuePtr memrefOrTensor,"
"unsigned index", [{
auto indexType = builder->getIndexType();
auto indexAttr = builder->getIntegerAttr(indexType, index);
@@ -730,7 +730,7 @@ def ExtractElementOp : Std_Op<"extract_element", [NoSideEffect]> {
let results = (outs AnyType);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *aggregate,"
+ "Builder *builder, OperationState &result, ValuePtr aggregate,"
"ValueRange indices = {}", [{
auto resType = aggregate->getType().cast<ShapedType>()
.getElementType();
@@ -738,7 +738,7 @@ def ExtractElementOp : Std_Op<"extract_element", [NoSideEffect]> {
}]>];
let extraClassDeclaration = [{
- Value *getAggregate() { return getOperand(0); }
+ ValuePtr getAggregate() { return getOperand(0); }
operand_range getIndices() {
return {operand_begin() + 1, operand_end()};
@@ -816,7 +816,7 @@ def LoadOp : Std_Op<"load"> {
let results = (outs AnyType);
let builders = [OpBuilder<
- "Builder *, OperationState &result, Value *memref,"
+ "Builder *, OperationState &result, ValuePtr memref,"
"ValueRange indices = {}", [{
auto memrefType = memref->getType().cast<MemRefType>();
result.addOperands(memref);
@@ -825,8 +825,8 @@ def LoadOp : Std_Op<"load"> {
}]>];
let extraClassDeclaration = [{
- Value *getMemRef() { return getOperand(0); }
- void setMemRef(Value *value) { setOperand(0, value); }
+ ValuePtr getMemRef() { return getOperand(0); }
+ void setMemRef(ValuePtr value) { setOperand(0, value); }
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
}
@@ -952,8 +952,8 @@ def PrefetchOp : Std_Op<"prefetch"> {
BoolAttr:$isDataCache);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *memref,"
- "ArrayRef<Value *> indices, bool isWrite, unsigned hint, bool isData",
+ "Builder *builder, OperationState &result, ValuePtr memref,"
+ "ArrayRef<ValuePtr> indices, bool isWrite, unsigned hint, bool isData",
[{
auto hintAttr = builder->getI32IntegerAttr(hint);
auto isWriteAttr = builder->getBoolAttr(isWrite);
@@ -990,7 +990,7 @@ def RankOp : Std_Op<"rank", [NoSideEffect]> {
let verifier = ?;
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *tensor", [{
+ "Builder *builder, OperationState &result, ValuePtr tensor", [{
auto indexType = builder->getIndexType();
build(builder, result, indexType, tensor);
}]>];
@@ -1052,16 +1052,16 @@ def SelectOp : Std_Op<"select", [NoSideEffect, SameOperandsAndResultShape]> {
let results = (outs AnyType);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *condition,"
- "Value *trueValue, Value *falseValue", [{
+ "Builder *builder, OperationState &result, ValuePtr condition,"
+ "ValuePtr trueValue, ValuePtr falseValue", [{
result.addOperands({condition, trueValue, falseValue});
result.addTypes(trueValue->getType());
}]>];
let extraClassDeclaration = [{
- Value *getCondition() { return condition(); }
- Value *getTrueValue() { return true_value(); }
- Value *getFalseValue() { return false_value(); }
+ ValuePtr getCondition() { return condition(); }
+ ValuePtr getTrueValue() { return true_value(); }
+ ValuePtr getFalseValue() { return false_value(); }
}];
let hasFolder = 1;
@@ -1089,7 +1089,7 @@ def SignExtendIOp : Std_Op<"sexti",
let results = (outs IntegerLike);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *value, Type destType", [{
+ "Builder *builder, OperationState &result, ValuePtr value, Type destType", [{
result.addOperands(value);
result.addTypes(destType);
}]>];
@@ -1189,7 +1189,7 @@ def SplatOp : Std_Op<"splat", [NoSideEffect]> {
let results = (outs AnyTypeOf<[AnyVector, AnyStaticShapeTensor]>:$aggregate);
let builders =
- [OpBuilder<"Builder *builder, OperationState &result, Value *element, "
+ [OpBuilder<"Builder *builder, OperationState &result, ValuePtr element, "
"Type aggregateType",
[{ build(builder, result, aggregateType, element); }]>];
@@ -1213,16 +1213,16 @@ def StoreOp : Std_Op<"store"> {
Variadic<Index>:$indices);
let builders = [OpBuilder<
- "Builder *, OperationState &result, Value *valueToStore, Value *memref", [{
+ "Builder *, OperationState &result, ValuePtr valueToStore, ValuePtr memref", [{
result.addOperands(valueToStore);
result.addOperands(memref);
}]>];
let extraClassDeclaration = [{
- Value *getValueToStore() { return getOperand(0); }
+ ValuePtr getValueToStore() { return getOperand(0); }
- Value *getMemRef() { return getOperand(1); }
- void setMemRef(Value *value) { setOperand(1, value); }
+ ValuePtr getMemRef() { return getOperand(1); }
+ void setMemRef(ValuePtr value) { setOperand(1, value); }
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
}
@@ -1364,13 +1364,13 @@ def SubViewOp : Std_Op<"subview", [AttrSizedOperandSegments, NoSideEffect]> {
let builders = [
OpBuilder<
- "Builder *b, OperationState &result, Value *source, "
+ "Builder *b, OperationState &result, ValuePtr source, "
"ValueRange offsets, ValueRange sizes, "
"ValueRange strides, Type resultType = Type(), "
"ArrayRef<NamedAttribute> attrs = {}">,
OpBuilder<
"Builder *builder, OperationState &result, "
- "Type resultType, Value *source">
+ "Type resultType, ValuePtr source">
];
let extraClassDeclaration = [{
@@ -1403,7 +1403,7 @@ def SubViewOp : Std_Op<"subview", [AttrSizedOperandSegments, NoSideEffect]> {
// offset, size and stride operands of the SubViewOp into a list of triples.
// Such a list of triple is sometimes more convenient to manipulate.
struct Range {
- Value *offset, *size, *stride;
+ ValuePtr offset, size, stride;
};
SmallVector<Range, 8> getRanges();
}];
@@ -1465,7 +1465,7 @@ def TensorLoadOp : Std_Op<"tensor_load",
let verifier = ?;
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *memref", [{
+ "Builder *builder, OperationState &result, ValuePtr memref", [{
auto memrefType = memref->getType().cast<MemRefType>();
auto resultType = RankedTensorType::get(memrefType.getShape(),
memrefType.getElementType());
@@ -1519,7 +1519,7 @@ def TruncateIOp : Std_Op<"trunci", [NoSideEffect, SameOperandsAndResultShape]> {
let results = (outs IntegerLike);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *value, Type destType", [{
+ "Builder *builder, OperationState &result, ValuePtr value, Type destType", [{
result.addOperands(value);
result.addTypes(destType);
}]>];
@@ -1578,7 +1578,7 @@ def ViewOp : Std_Op<"view", [NoSideEffect]> {
/// Returns the dynamic offset for this view operation if specified.
/// Returns nullptr if no dynamic offset was specified.
- Value *getDynamicOffset();
+ ValuePtr getDynamicOffset();
/// Returns the starting operand list position of the dynamic size operands.
unsigned getDynamicSizesOperandStart() {
@@ -1619,7 +1619,7 @@ def ZeroExtendIOp : Std_Op<"zexti", [NoSideEffect, SameOperandsAndResultShape]>
let results = (outs IntegerLike);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *value, Type destType", [{
+ "Builder *builder, OperationState &result, ValuePtr value, Type destType", [{
result.addOperands(value);
result.addTypes(destType);
}]>];
diff --git a/mlir/include/mlir/Dialect/VectorOps/Utils.h b/mlir/include/mlir/Dialect/VectorOps/Utils.h
index f61a813855d..68c62cc7ec7 100644
--- a/mlir/include/mlir/Dialect/VectorOps/Utils.h
+++ b/mlir/include/mlir/Dialect/VectorOps/Utils.h
@@ -34,6 +34,9 @@ class Operation;
class Value;
class VectorType;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
/// Computes and returns the multi-dimensional ratio of `superShape` to
/// `subShape`. This is calculated by performing a traversal from minor to major
/// dimensions (i.e. in reverse shape order). If integral division is not
@@ -122,7 +125,7 @@ Optional<SmallVector<int64_t, 4>> shapeRatio(VectorType superVectorType,
/// `%arg0[%c0, %c0]` into vector<128xf32> which needs a 1-D vector broadcast.
///
AffineMap
-makePermutationMap(Operation *op, ArrayRef<Value *> indices,
+makePermutationMap(Operation *op, ArrayRef<ValuePtr> indices,
const DenseMap<Operation *, unsigned> &loopToVectorDim);
namespace matcher {
diff --git a/mlir/include/mlir/Dialect/VectorOps/VectorOps.td b/mlir/include/mlir/Dialect/VectorOps/VectorOps.td
index 5fd19498350..94262e6f1ff 100644
--- a/mlir/include/mlir/Dialect/VectorOps/VectorOps.td
+++ b/mlir/include/mlir/Dialect/VectorOps/VectorOps.td
@@ -128,8 +128,8 @@ def Vector_ContractionOp :
: vector<7x8x16x15xf32>, vector<8x16x7x5xf32> into vector<8x15x8x5xf32>
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *lhs, Value *rhs, "
- "Value *acc, ArrayAttr indexingMaps, ArrayAttr iteratorTypes">];
+ "Builder *builder, OperationState &result, ValuePtr lhs, ValuePtr rhs, "
+ "ValuePtr acc, ArrayAttr indexingMaps, ArrayAttr iteratorTypes">];
let extraClassDeclaration = [{
VectorType getLhsType() {
return lhs()->getType().cast<VectorType>();
@@ -252,7 +252,8 @@ def Vector_ShuffleOp :
```
}];
- let builders = [OpBuilder<"Builder *builder, OperationState &result, Value *v1, Value *v2, ArrayRef<int64_t>">];
+ let builders = [OpBuilder<"Builder *builder, OperationState &result,"
+ "ValuePtr v1, ValuePtr v2, ArrayRef<int64_t>">];
let extraClassDeclaration = [{
static StringRef getMaskAttrName() { return "mask"; }
VectorType getV1VectorType() {
@@ -312,7 +313,8 @@ def Vector_ExtractOp :
```
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source, ArrayRef<int64_t>">];
+ "Builder *builder, OperationState &result, ValuePtr source,"
+ "ArrayRef<int64_t>">];
let extraClassDeclaration = [{
static StringRef getPositionAttrName() { return "position"; }
VectorType getVectorType() {
@@ -357,7 +359,7 @@ def Vector_ExtractSlicesOp :
}];
let builders = [OpBuilder<
"Builder *builder, OperationState &result, TupleType tupleType, " #
- "Value *vector, ArrayRef<int64_t> sizes, " #
+ "ValuePtr vector, ArrayRef<int64_t> sizes, " #
"ArrayRef<int64_t> strides">];
let extraClassDeclaration = [{
VectorType getSourceVectorType() {
@@ -428,8 +430,8 @@ def Vector_InsertOp :
```
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source, " #
- "Value *dest, ArrayRef<int64_t>">];
+ "Builder *builder, OperationState &result, ValuePtr source, " #
+ "ValuePtr dest, ArrayRef<int64_t>">];
let extraClassDeclaration = [{
static StringRef getPositionAttrName() { return "position"; }
Type getSourceType() { return source()->getType(); }
@@ -521,7 +523,7 @@ def Vector_InsertStridedSliceOp :
```
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source, Value *dest, " #
+ "Builder *builder, OperationState &result, ValuePtr source, ValuePtr dest, " #
"ArrayRef<int64_t> offsets, ArrayRef<int64_t> strides">];
let extraClassDeclaration = [{
static StringRef getOffsetsAttrName() { return "offsets"; }
@@ -723,7 +725,7 @@ def Vector_StridedSliceOp :
vector<4x8x16xf32> to vector<2x4x16xf32>
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source, " #
+ "Builder *builder, OperationState &result, ValuePtr source, " #
"ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes, " #
"ArrayRef<int64_t> strides">];
let extraClassDeclaration = [{
@@ -975,7 +977,7 @@ def Vector_TypeCastOp :
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source">];
+ "Builder *builder, OperationState &result, ValuePtr source">];
let parser = [{
return impl::parseCastOp(parser, result);
diff --git a/mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h b/mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h
index 2c2e4e7c4fa..b48cb51533f 100644
--- a/mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h
+++ b/mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h
@@ -73,8 +73,9 @@ namespace vector {
//
// This will be extended in the future to support more advanced use cases than
// simple pointwise ops.
-Value *unrollSingleResultOpMatchingType(PatternRewriter &builder, Operation *op,
- ArrayRef<int64_t> targetShape);
+ValuePtr unrollSingleResultOpMatchingType(PatternRewriter &builder,
+ Operation *op,
+ ArrayRef<int64_t> targetShape);
} // namespace vector
} // namespace mlir
diff --git a/mlir/include/mlir/EDSC/Builders.h b/mlir/include/mlir/EDSC/Builders.h
index 69c72a50870..11ee0bff342 100644
--- a/mlir/include/mlir/EDSC/Builders.h
+++ b/mlir/include/mlir/EDSC/Builders.h
@@ -152,7 +152,7 @@ private:
/// A LoopBuilder is a generic NestedBuilder for loop-like MLIR operations.
/// More specifically it is meant to be used as a temporary object for
-/// representing any nested MLIR construct that is "related to" an mlir::Value*
+/// representing any nested MLIR construct that is "related to" an mlir::Value
/// (for now an induction variable).
/// This is extensible and will evolve in the future as MLIR evolves, hence
/// the name LoopBuilder (as opposed to say ForBuilder or AffineForBuilder).
@@ -242,7 +242,7 @@ class Append {};
/// A BlockBuilder is a NestedBuilder for mlir::Block*.
/// This exists by opposition to LoopBuilder which is not related to an
-/// mlir::Block* but to a mlir::Value*.
+/// mlir::Block* but to a mlir::Value.
/// It is meant to be used as a temporary object for representing any nested
/// MLIR construct that is "related to" an mlir::Block*.
class BlockBuilder : public NestedBuilder {
@@ -257,7 +257,7 @@ public:
///
/// Prerequisites:
/// The ValueHandle `args` are typed delayed ValueHandles; i.e. they are
- /// not yet bound to mlir::Value*.
+ /// not yet bound to mlir::Value.
BlockBuilder(BlockHandle *bh, ArrayRef<ValueHandle *> args);
/// The only purpose of this operator is to serve as a sequence point so that
@@ -291,10 +291,10 @@ protected:
/// typed "delayed" value that can be hold a Value in the future;
/// 3. constructed state,in which case it holds a Value.
///
-/// A ValueHandle is meant to capture a single Value* and should be used for
+/// A ValueHandle is meant to capture a single Value and should be used for
/// operations that have a single result. For convenience of use, we also
/// include AffineForOp in this category although it does not return a value.
-/// In the case of AffineForOp, the captured Value* is the loop induction
+/// In the case of AffineForOp, the captured Value is the loop induction
/// variable.
class ValueHandle : public CapturableHandle {
public:
@@ -304,15 +304,15 @@ public:
/// A ValueHandle that is constructed from a Type represents a typed "delayed"
/// Value. A delayed Value can only capture Values of the specified type.
/// Such a delayed value represents the declaration (in the PL sense) of a
- /// placeholder for an mlir::Value* that will be constructed and captured at
+ /// placeholder for an mlir::Value that will be constructed and captured at
/// some later point in the program.
explicit ValueHandle(Type t) : t(t), v(nullptr) {}
- /// A ValueHandle that is constructed from an mlir::Value* is an "eager"
+ /// A ValueHandle that is constructed from an mlir::Value is an "eager"
/// Value. An eager Value represents both the declaration and the definition
- /// (in the PL sense) of a placeholder for an mlir::Value* that has already
+ /// (in the PL sense) of a placeholder for an mlir::Value that has already
/// been constructed in the past and that is captured "now" in the program.
- explicit ValueHandle(Value *v) : t(v->getType()), v(v) {}
+ explicit ValueHandle(ValuePtr v) : t(v->getType()), v(v) {}
/// Builds a ConstantIndexOp of value `cst`. The constant is created at the
/// current insertion point.
@@ -336,8 +336,8 @@ public:
std::swap(v, other.v);
}
- /// Implicit conversion useful for automatic conversion to Container<Value*>.
- operator Value *() const { return getValue(); }
+ /// Implicit conversion useful for automatic conversion to Container<Value>.
+ operator ValuePtr() const { return getValue(); }
/// Generic mlir::Op create. This is the key to being extensible to the whole
/// of MLIR without duplicating the type system or the op definitions.
@@ -355,7 +355,7 @@ public:
/// Special case to build composed AffineApply operations.
// TODO: createOrFold when available and move inside of the `create` method.
static ValueHandle createComposedAffineApply(AffineMap map,
- ArrayRef<Value *> operands);
+ ArrayRef<ValuePtr> operands);
/// Generic create for a named operation producing a single value.
static ValueHandle create(StringRef name, ArrayRef<ValueHandle> operands,
@@ -363,7 +363,7 @@ public:
ArrayRef<NamedAttribute> attributes = {});
bool hasValue() const { return v != nullptr; }
- Value *getValue() const {
+ ValuePtr getValue() const {
assert(hasValue() && "Unexpected null value;");
return v;
}
@@ -380,12 +380,12 @@ protected:
ValueHandle() : t(), v(nullptr) {}
Type t;
- Value *v;
+ ValuePtr v;
};
/// An OperationHandle can be used in lieu of ValueHandle to capture the
/// operation in cases when one does not care about, or cannot extract, a
-/// unique Value* from the operation.
+/// unique Value from the operation.
/// This can be used for capturing zero result operations as well as
/// multi-result operations that are not supported by ValueHandle.
/// We do not distinguish further between zero and multi-result operations at
@@ -529,7 +529,7 @@ ValueHandle operator>=(ValueHandle lhs, ValueHandle rhs);
} // namespace op
-/// Entry point to build multiple ValueHandle from a `Container` of Value* or
+/// Entry point to build multiple ValueHandle from a `Container` of Value or
/// Type.
template <typename Container>
inline SmallVector<ValueHandle, 8> makeValueHandles(Container values) {
diff --git a/mlir/include/mlir/EDSC/Helpers.h b/mlir/include/mlir/EDSC/Helpers.h
index 423c92b2d06..c18307e7121 100644
--- a/mlir/include/mlir/EDSC/Helpers.h
+++ b/mlir/include/mlir/EDSC/Helpers.h
@@ -75,7 +75,7 @@ protected:
// TODO(ntv): Support MemRefs with layoutMaps.
class MemRefView : public View {
public:
- explicit MemRefView(Value *v);
+ explicit MemRefView(ValuePtr v);
MemRefView(const MemRefView &) = default;
MemRefView &operator=(const MemRefView &) = default;
@@ -91,7 +91,7 @@ private:
/// a MemRefView but for vectors. This exists purely for boilerplate avoidance.
class VectorView : public View {
public:
- explicit VectorView(Value *v);
+ explicit VectorView(ValuePtr v);
VectorView(const VectorView &) = default;
VectorView &operator=(const VectorView &) = default;
@@ -120,7 +120,7 @@ private:
template <typename Load, typename Store> class TemplatedIndexedValue {
public:
explicit TemplatedIndexedValue(Type t) : base(t) {}
- explicit TemplatedIndexedValue(Value *v)
+ explicit TemplatedIndexedValue(ValuePtr v)
: TemplatedIndexedValue(ValueHandle(v)) {}
explicit TemplatedIndexedValue(ValueHandle v) : base(v) {}
@@ -161,8 +161,8 @@ public:
return Load(getBase(), {indices.begin(), indices.end()});
}
- /// Emits a `load` when converting to a Value*.
- Value *operator*(void)const {
+ /// Emits a `load` when converting to a Value.
+ ValuePtr operator*(void) const {
return Load(getBase(), {indices.begin(), indices.end()}).getValue();
}
diff --git a/mlir/include/mlir/EDSC/Intrinsics.h b/mlir/include/mlir/EDSC/Intrinsics.h
index 06c75505cb7..dc0c1186c7a 100644
--- a/mlir/include/mlir/EDSC/Intrinsics.h
+++ b/mlir/include/mlir/EDSC/Intrinsics.h
@@ -44,7 +44,7 @@ struct IndexHandle : public ValueHandle {
explicit IndexHandle()
: ValueHandle(ScopedContext::getBuilder().getIndexType()) {}
explicit IndexHandle(index_t v) : ValueHandle(v) {}
- explicit IndexHandle(Value *v) : ValueHandle(v) {
+ explicit IndexHandle(ValuePtr v) : ValueHandle(v) {
assert(v->getType() == ScopedContext::getBuilder().getIndexType() &&
"Expected index type");
}
@@ -79,9 +79,9 @@ makeHandlePointers(MutableArrayRef<T> ivs) {
return pivs;
}
-/// Returns a vector of the underlying Value* from `ivs`.
-inline SmallVector<Value *, 8> extractValues(ArrayRef<IndexHandle> ivs) {
- SmallVector<Value *, 8> vals;
+/// Returns a vector of the underlying Value from `ivs`.
+inline SmallVector<ValuePtr, 8> extractValues(ArrayRef<IndexHandle> ivs) {
+ SmallVector<ValuePtr, 8> vals;
vals.reserve(ivs.size());
for (auto &iv : ivs) {
vals.push_back(iv.getValue());
@@ -96,7 +96,7 @@ namespace intrinsics {
namespace detail {
/// Helper structure to be used with ValueBuilder / OperationBuilder.
/// It serves the purpose of removing boilerplate specialization for the sole
-/// purpose of implicitly converting ArrayRef<ValueHandle> -> ArrayRef<Value*>.
+/// purpose of implicitly converting ArrayRef<ValueHandle> -> ArrayRef<Value>.
class ValueHandleArray {
public:
ValueHandleArray(ArrayRef<ValueHandle> vals) {
@@ -109,11 +109,11 @@ public:
SmallVector<IndexHandle, 8> tmp(vals.begin(), vals.end());
values.append(tmp.begin(), tmp.end());
}
- operator ArrayRef<Value *>() { return values; }
+ operator ArrayRef<ValuePtr>() { return values; }
private:
ValueHandleArray() = default;
- SmallVector<Value *, 8> values;
+ SmallVector<ValuePtr, 8> values;
};
template <typename T> inline T unpack(T value) { return value; }
@@ -128,8 +128,8 @@ inline detail::ValueHandleArray unpack(ArrayRef<ValueHandle> values) {
/// boilerplate or Tablegen.
/// Arguably a builder is not a ValueHandle but in practice it is only used as
/// an alias to a notional ValueHandle<Op>.
-/// Implementing it as a subclass allows it to compose all the way to Value*.
-/// Without subclassing, implicit conversion to Value* would fail when composing
+/// Implementing it as a subclass allows it to compose all the way to Value.
+/// Without subclassing, implicit conversion to Value would fail when composing
/// in patterns such as: `select(a, b, select(c, d, e))`.
template <typename Op> struct ValueBuilder : public ValueHandle {
// Builder-based
@@ -238,8 +238,8 @@ OperationHandle br(BlockHandle bh, ArrayRef<ValueHandle> operands);
///
/// Prerequisites:
/// `b` has not yet captured an mlir::Block*.
-/// No `captures` have captured any mlir::Value*.
-/// All `operands` have already captured an mlir::Value*
+/// No `captures` have captured any mlir::Value.
+/// All `operands` have already captured an mlir::Value
/// captures.size() == operands.size()
/// captures and operands are pairwise of the same type.
OperationHandle br(BlockHandle *bh, ArrayRef<ValueHandle *> captures,
@@ -266,8 +266,8 @@ OperationHandle cond_br(ValueHandle cond, BlockHandle trueBranch,
///
/// Prerequisites:
/// `trueBranch`/`falseBranch` has not yet captured an mlir::Block*.
-/// No `trueCaptures`/`falseCaptures` have captured any mlir::Value*.
-/// All `trueOperands`/`trueOperands` have already captured an mlir::Value*
+/// No `trueCaptures`/`falseCaptures` have captured any mlir::Value.
+/// All `trueOperands`/`trueOperands` have already captured an mlir::Value
/// `trueCaptures`.size() == `trueOperands`.size()
/// `falseCaptures`.size() == `falseOperands`.size()
/// `trueCaptures` and `trueOperands` are pairwise of the same type
diff --git a/mlir/include/mlir/IR/Block.h b/mlir/include/mlir/IR/Block.h
index 6c5099b06da..87c77160e1d 100644
--- a/mlir/include/mlir/IR/Block.h
+++ b/mlir/include/mlir/IR/Block.h
@@ -72,7 +72,7 @@ public:
//===--------------------------------------------------------------------===//
// This is the list of arguments to the block.
- using BlockArgListType = ArrayRef<BlockArgument *>;
+ using BlockArgListType = ArrayRef<BlockArgumentPtr>;
BlockArgListType getArguments() { return arguments; }
@@ -86,7 +86,7 @@ public:
bool args_empty() { return arguments.empty(); }
/// Add one value to the argument list.
- BlockArgument *addArgument(Type type);
+ BlockArgumentPtr addArgument(Type type);
/// Add one argument to the argument list for each type specified in the list.
iterator_range<args_iterator> addArguments(ArrayRef<Type> types);
@@ -97,7 +97,7 @@ public:
void eraseArgument(unsigned index, bool updatePredTerms = true);
unsigned getNumArguments() { return arguments.size(); }
- BlockArgument *getArgument(unsigned i) { return arguments[i]; }
+ BlockArgumentPtr getArgument(unsigned i) { return arguments[i]; }
//===--------------------------------------------------------------------===//
// Operation list management
@@ -332,7 +332,7 @@ private:
OpListType operations;
/// This is the list of arguments to the block.
- std::vector<BlockArgument *> arguments;
+ std::vector<BlockArgumentPtr> arguments;
Block(Block &) = delete;
void operator=(Block &) = delete;
diff --git a/mlir/include/mlir/IR/BlockAndValueMapping.h b/mlir/include/mlir/IR/BlockAndValueMapping.h
index cd15d457a77..287dd508fa6 100644
--- a/mlir/include/mlir/IR/BlockAndValueMapping.h
+++ b/mlir/include/mlir/IR/BlockAndValueMapping.h
@@ -37,7 +37,7 @@ public:
/// Inserts a new mapping for 'from' to 'to'. If there is an existing mapping,
/// it is overwritten.
void map(Block *from, Block *to) { valueMap[from] = to; }
- void map(Value *from, Value *to) { valueMap[from] = to; }
+ void map(ValuePtr from, ValuePtr to) { valueMap[from] = to; }
/// Erases a mapping for 'from'.
void erase(IRObjectWithUseList *from) { valueMap.erase(from); }
@@ -52,8 +52,8 @@ public:
Block *lookupOrNull(Block *from) const {
return lookupOrValue(from, (Block *)nullptr);
}
- Value *lookupOrNull(Value *from) const {
- return lookupOrValue(from, (Value *)nullptr);
+ ValuePtr lookupOrNull(ValuePtr from) const {
+ return lookupOrValue(from, (ValuePtr) nullptr);
}
/// Lookup a mapped value within the map. If a mapping for the provided value
@@ -61,7 +61,7 @@ public:
Block *lookupOrDefault(Block *from) const {
return lookupOrValue(from, from);
}
- Value *lookupOrDefault(Value *from) const {
+ ValuePtr lookupOrDefault(ValuePtr from) const {
return lookupOrValue(from, from);
}
diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h
index 766902fabfa..c199c09feb5 100644
--- a/mlir/include/mlir/IR/Builders.h
+++ b/mlir/include/mlir/IR/Builders.h
@@ -313,7 +313,7 @@ public:
/// and immediately try to fold it. This functions populates 'results' with
/// the results after folding the operation.
template <typename OpTy, typename... Args>
- void createOrFold(SmallVectorImpl<Value *> &results, Location location,
+ void createOrFold(SmallVectorImpl<ValuePtr> &results, Location location,
Args &&... args) {
// Create the operation without using 'createOperation' as we don't want to
// insert it yet.
@@ -331,9 +331,9 @@ public:
/// Overload to create or fold a single result operation.
template <typename OpTy, typename... Args>
typename std::enable_if<OpTy::template hasTrait<OpTrait::OneResult>(),
- Value *>::type
+ ValuePtr>::type
createOrFold(Location location, Args &&... args) {
- SmallVector<Value *, 1> results;
+ SmallVector<ValuePtr, 1> results;
createOrFold<OpTy>(results, location, std::forward<Args>(args)...);
return results.front();
}
@@ -344,7 +344,7 @@ public:
OpTy>::type
createOrFold(Location location, Args &&... args) {
auto op = create<OpTy>(location, std::forward<Args>(args)...);
- SmallVector<Value *, 0> unused;
+ SmallVector<ValuePtr, 0> unused;
tryFold(op.getOperation(), unused);
// Folding cannot remove a zero-result operation, so for convenience we
@@ -355,7 +355,7 @@ public:
/// Attempts to fold the given operation and places new results within
/// 'results'. Returns success if the operation was folded, failure otherwise.
/// Note: This function does not erase the operation on a successful fold.
- LogicalResult tryFold(Operation *op, SmallVectorImpl<Value *> &results);
+ LogicalResult tryFold(Operation *op, SmallVectorImpl<ValuePtr> &results);
/// Creates a deep copy of the specified operation, remapping any operands
/// that use values outside of the operation using the map that is provided
diff --git a/mlir/include/mlir/IR/FunctionSupport.h b/mlir/include/mlir/IR/FunctionSupport.h
index b15b056a3ec..1ba85d73df9 100644
--- a/mlir/include/mlir/IR/FunctionSupport.h
+++ b/mlir/include/mlir/IR/FunctionSupport.h
@@ -183,7 +183,7 @@ public:
}
/// Gets argument.
- BlockArgument *getArgument(unsigned idx) {
+ BlockArgumentPtr getArgument(unsigned idx) {
return getBlocks().front().getArgument(idx);
}
diff --git a/mlir/include/mlir/IR/Matchers.h b/mlir/include/mlir/IR/Matchers.h
index 1261916dae2..3b36f2fb5eb 100644
--- a/mlir/include/mlir/IR/Matchers.h
+++ b/mlir/include/mlir/IR/Matchers.h
@@ -142,7 +142,7 @@ using has_operation_or_value_matcher_t =
/// Statically switch to a Value matcher.
template <typename MatcherClass>
typename std::enable_if_t<is_detected<detail::has_operation_or_value_matcher_t,
- MatcherClass, Value *>::value,
+ MatcherClass, ValuePtr>::value,
bool>
matchOperandOrValueAtIndex(Operation *op, unsigned idx, MatcherClass &matcher) {
return matcher.match(op->getOperand(idx));
@@ -161,14 +161,14 @@ matchOperandOrValueAtIndex(Operation *op, unsigned idx, MatcherClass &matcher) {
/// Terminal matcher, always returns true.
struct AnyValueMatcher {
- bool match(Value *op) const { return true; }
+ bool match(ValuePtr op) const { return true; }
};
/// Binds to a specific value and matches it.
struct PatternMatcherValue {
- PatternMatcherValue(Value *val) : value(val) {}
- bool match(Value *val) const { return val == value; }
- Value *value;
+ PatternMatcherValue(ValuePtr val) : value(val) {}
+ bool match(ValuePtr val) const { return val == value; }
+ ValuePtr value;
};
template <typename TupleT, class CallbackT, std::size_t... Is>
@@ -235,7 +235,7 @@ inline detail::constant_int_not_value_matcher<0> m_NonZero() {
/// Entry point for matching a pattern over a Value.
template <typename Pattern>
-inline bool matchPattern(Value *value, const Pattern &pattern) {
+inline bool matchPattern(ValuePtr value, const Pattern &pattern) {
// TODO: handle other cases
if (auto *op = value->getDefiningOp())
return const_cast<Pattern &>(pattern).match(op);
@@ -262,7 +262,7 @@ auto m_Op(Matchers... matchers) {
namespace matchers {
inline auto m_Any() { return detail::AnyValueMatcher(); }
-inline auto m_Val(Value *v) { return detail::PatternMatcherValue(v); }
+inline auto m_Val(ValuePtr v) { return detail::PatternMatcherValue(v); }
} // namespace matchers
} // end namespace mlir
diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h
index c220120b337..437540117c4 100644
--- a/mlir/include/mlir/IR/OpDefinition.h
+++ b/mlir/include/mlir/IR/OpDefinition.h
@@ -257,8 +257,8 @@ inline bool operator!=(OpState lhs, OpState rhs) {
}
/// This class represents a single result from folding an operation.
-class OpFoldResult : public PointerUnion<Attribute, Value *> {
- using PointerUnion<Attribute, Value *>::PointerUnion;
+class OpFoldResult : public PointerUnion<Attribute, ValuePtr> {
+ using PointerUnion<Attribute, ValuePtr>::PointerUnion;
};
/// This template defines the foldHook as used by AbstractOperation.
@@ -311,8 +311,8 @@ class FoldingHook<ConcreteType, isSingleResult,
typename std::enable_if<isSingleResult>::type> {
public:
/// If the operation returns a single value, then the Op can be implicitly
- /// converted to an Value*. This yields the value of the only result.
- operator Value *() {
+ /// converted to an Value. This yields the value of the only result.
+ operator ValuePtr() {
return static_cast<ConcreteType *>(this)->getOperation()->getResult(0);
}
@@ -326,7 +326,7 @@ public:
// Check if the operation was folded in place. In this case, the operation
// returns itself.
- if (result.template dyn_cast<Value *>() != op->getResult(0))
+ if (result.template dyn_cast<ValuePtr>() != op->getResult(0))
results.push_back(result);
return success();
}
@@ -428,10 +428,12 @@ struct MultiOperandTraitBase : public TraitBase<ConcreteType, TraitType> {
unsigned getNumOperands() { return this->getOperation()->getNumOperands(); }
/// Return the operand at index 'i'.
- Value *getOperand(unsigned i) { return this->getOperation()->getOperand(i); }
+ ValuePtr getOperand(unsigned i) {
+ return this->getOperation()->getOperand(i);
+ }
/// Set the operand at index 'i' to 'value'.
- void setOperand(unsigned i, Value *value) {
+ void setOperand(unsigned i, ValuePtr value) {
this->getOperation()->setOperand(i, value);
}
@@ -475,9 +477,11 @@ private:
template <typename ConcreteType>
class OneOperand : public TraitBase<ConcreteType, OneOperand> {
public:
- Value *getOperand() { return this->getOperation()->getOperand(0); }
+ ValuePtr getOperand() { return this->getOperation()->getOperand(0); }
- void setOperand(Value *value) { this->getOperation()->setOperand(0, value); }
+ void setOperand(ValuePtr value) {
+ this->getOperation()->setOperand(0, value);
+ }
static LogicalResult verifyTrait(Operation *op) {
return impl::verifyOneOperand(op);
@@ -550,7 +554,7 @@ struct MultiResultTraitBase : public TraitBase<ConcreteType, TraitType> {
unsigned getNumResults() { return this->getOperation()->getNumResults(); }
/// Return the result at index 'i'.
- Value *getResult(unsigned i) { return this->getOperation()->getResult(i); }
+ ValuePtr getResult(unsigned i) { return this->getOperation()->getResult(i); }
/// Replace all uses of results of this operation with the provided 'values'.
/// 'values' may correspond to an existing operation, or a range of 'Value'.
@@ -586,13 +590,13 @@ struct MultiResultTraitBase : public TraitBase<ConcreteType, TraitType> {
template <typename ConcreteType>
class OneResult : public TraitBase<ConcreteType, OneResult> {
public:
- Value *getResult() { return this->getOperation()->getResult(0); }
+ ValuePtr getResult() { return this->getOperation()->getResult(0); }
Type getType() { return getResult()->getType(); }
/// Replace all uses of 'this' value with the new value, updating anything in
/// the IR that uses 'this' to use the other value instead. When this returns
/// there are zero uses of 'this'.
- void replaceAllUsesWith(Value *newValue) {
+ void replaceAllUsesWith(ValuePtr newValue) {
getResult()->replaceAllUsesWith(newValue);
}
@@ -820,10 +824,10 @@ public:
return this->getOperation()->setSuccessor(block, index);
}
- void addSuccessorOperand(unsigned index, Value *value) {
+ void addSuccessorOperand(unsigned index, ValuePtr value) {
return this->getOperation()->addSuccessorOperand(index, value);
}
- void addSuccessorOperands(unsigned index, ArrayRef<Value *> values) {
+ void addSuccessorOperands(unsigned index, ArrayRef<ValuePtr> values) {
return this->getOperation()->addSuccessorOperand(index, values);
}
};
@@ -1209,8 +1213,8 @@ namespace impl {
ParseResult parseOneResultOneOperandTypeOp(OpAsmParser &parser,
OperationState &result);
-void buildBinaryOp(Builder *builder, OperationState &result, Value *lhs,
- Value *rhs);
+void buildBinaryOp(Builder *builder, OperationState &result, ValuePtr lhs,
+ ValuePtr rhs);
ParseResult parseOneResultSameOperandTypeOp(OpAsmParser &parser,
OperationState &result);
@@ -1223,11 +1227,11 @@ void printOneResultOp(Operation *op, OpAsmPrinter &p);
// These functions are out-of-line implementations of the methods in CastOp,
// which avoids them being template instantiated/duplicated.
namespace impl {
-void buildCastOp(Builder *builder, OperationState &result, Value *source,
+void buildCastOp(Builder *builder, OperationState &result, ValuePtr source,
Type destType);
ParseResult parseCastOp(OpAsmParser &parser, OperationState &result);
void printCastOp(Operation *op, OpAsmPrinter &p);
-Value *foldCastOp(Operation *op);
+ValuePtr foldCastOp(Operation *op);
} // namespace impl
} // end namespace mlir
diff --git a/mlir/include/mlir/IR/OpImplementation.h b/mlir/include/mlir/IR/OpImplementation.h
index 7dd11d089c2..fcadce9ab16 100644
--- a/mlir/include/mlir/IR/OpImplementation.h
+++ b/mlir/include/mlir/IR/OpImplementation.h
@@ -45,7 +45,7 @@ public:
virtual raw_ostream &getStream() const = 0;
/// Print implementations for various things an operation contains.
- virtual void printOperand(Value *value) = 0;
+ virtual void printOperand(ValuePtr value) = 0;
/// Print a comma separated list of operands.
template <typename ContainerType>
@@ -121,7 +121,7 @@ public:
void printFunctionalType(Operation *op) {
auto &os = getStream();
os << "(";
- interleaveComma(op->getNonSuccessorOperands(), os, [&](Value *operand) {
+ interleaveComma(op->getNonSuccessorOperands(), os, [&](ValuePtr operand) {
if (operand)
printType(operand->getType());
else
@@ -150,18 +150,18 @@ private:
};
// Make the implementations convenient to use.
-inline OpAsmPrinter &operator<<(OpAsmPrinter &p, Value &value) {
+inline OpAsmPrinter &operator<<(OpAsmPrinter &p, ValueRef value) {
p.printOperand(&value);
return p;
}
-inline OpAsmPrinter &operator<<(OpAsmPrinter &p, Value *value) {
+inline OpAsmPrinter &operator<<(OpAsmPrinter &p, ValuePtr value) {
return p << *value;
}
-template <typename T,
- typename std::enable_if<std::is_convertible<T &, ValueRange>::value &&
- !std::is_convertible<T &, Value *>::value,
- T>::type * = nullptr>
+template <typename T, typename std::enable_if<
+ std::is_convertible<T &, ValueRange>::value &&
+ !std::is_convertible<T &, ValuePtr>::value,
+ T>::type * = nullptr>
inline OpAsmPrinter &operator<<(OpAsmPrinter &p, const T &values) {
p.printOperands(values);
return p;
@@ -181,8 +181,8 @@ inline OpAsmPrinter &operator<<(OpAsmPrinter &p, Attribute attr) {
// even if it isn't exactly one of them. For example, we want to print
// FunctionType with the Type version above, not have it match this.
template <typename T, typename std::enable_if<
- !std::is_convertible<T &, Value &>::value &&
- !std::is_convertible<T &, Value *>::value &&
+ !std::is_convertible<T &, ValueRef>::value &&
+ !std::is_convertible<T &, ValuePtr>::value &&
!std::is_convertible<T &, Type &>::value &&
!std::is_convertible<T &, Attribute &>::value &&
!std::is_convertible<T &, ValueRange>::value &&
@@ -467,13 +467,13 @@ public:
/// Resolve an operand to an SSA value, emitting an error on failure.
virtual ParseResult resolveOperand(const OperandType &operand, Type type,
- SmallVectorImpl<Value *> &result) = 0;
+ SmallVectorImpl<ValuePtr> &result) = 0;
/// Resolve a list of operands to SSA values, emitting an error on failure, or
/// appending the results to the list on success. This method should be used
/// when all operands have the same type.
ParseResult resolveOperands(ArrayRef<OperandType> operands, Type type,
- SmallVectorImpl<Value *> &result) {
+ SmallVectorImpl<ValuePtr> &result) {
for (auto elt : operands)
if (resolveOperand(elt, type, result))
return failure();
@@ -485,7 +485,7 @@ public:
/// to the list on success.
ParseResult resolveOperands(ArrayRef<OperandType> operands,
ArrayRef<Type> types, llvm::SMLoc loc,
- SmallVectorImpl<Value *> &result) {
+ SmallVectorImpl<ValuePtr> &result) {
if (operands.size() != types.size())
return emitError(loc)
<< operands.size() << " operands present, but expected "
@@ -556,7 +556,7 @@ public:
/// Parse a single operation successor and its operand list.
virtual ParseResult
parseSuccessorAndUseList(Block *&dest,
- SmallVectorImpl<Value *> &operands) = 0;
+ SmallVectorImpl<ValuePtr> &operands) = 0;
//===--------------------------------------------------------------------===//
// Type Parsing
@@ -634,7 +634,7 @@ private:
/// A functor used to set the name of the start of a result group of an
/// operation. See 'getAsmResultNames' below for more details.
-using OpAsmSetValueNameFn = function_ref<void(Value *, StringRef)>;
+using OpAsmSetValueNameFn = function_ref<void(ValuePtr, StringRef)>;
class OpAsmDialectInterface
: public DialectInterface::Base<OpAsmDialectInterface> {
diff --git a/mlir/include/mlir/IR/Operation.h b/mlir/include/mlir/IR/Operation.h
index 2159d10fd2a..ad0dc600f8f 100644
--- a/mlir/include/mlir/IR/Operation.h
+++ b/mlir/include/mlir/IR/Operation.h
@@ -44,7 +44,7 @@ public:
/// Create a new Operation with the specific fields.
static Operation *create(Location location, OperationName name,
ArrayRef<Type> resultTypes,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
ArrayRef<NamedAttribute> attributes,
ArrayRef<Block *> successors, unsigned numRegions,
bool resizableOperandList);
@@ -53,7 +53,7 @@ public:
/// unnecessarily uniquing a list of attributes.
static Operation *create(Location location, OperationName name,
ArrayRef<Type> resultTypes,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
NamedAttributeList attributes,
ArrayRef<Block *> successors, unsigned numRegions,
bool resizableOperandList);
@@ -64,7 +64,7 @@ public:
/// Create a new Operation with the specific fields.
static Operation *
create(Location location, OperationName name, ArrayRef<Type> resultTypes,
- ArrayRef<Value *> operands, NamedAttributeList attributes,
+ ArrayRef<ValuePtr> operands, NamedAttributeList attributes,
ArrayRef<Block *> successors = {}, RegionRange regions = {},
bool resizableOperandList = false);
@@ -149,7 +149,7 @@ public:
}
/// Replace any uses of 'from' with 'to' within this operation.
- void replaceUsesOfWith(Value *from, Value *to);
+ void replaceUsesOfWith(ValuePtr from, ValuePtr to);
/// Replace all uses of results of this operation with the provided 'values'.
template <typename ValuesT,
@@ -215,8 +215,8 @@ public:
unsigned getNumOperands() { return getOperandStorage().size(); }
- Value *getOperand(unsigned idx) { return getOpOperand(idx).get(); }
- void setOperand(unsigned idx, Value *value) {
+ ValuePtr getOperand(unsigned idx) { return getOpOperand(idx).get(); }
+ void setOperand(unsigned idx, ValuePtr value) {
return getOpOperand(idx).set(value);
}
@@ -227,7 +227,7 @@ public:
operand_iterator operand_begin() { return getOperands().begin(); }
operand_iterator operand_end() { return getOperands().end(); }
- /// Returns an iterator on the underlying Value's (Value *).
+ /// Returns an iterator on the underlying Value's (ValuePtr ).
operand_range getOperands() { return operand_range(this); }
/// Erase the operand at position `idx`.
@@ -255,7 +255,7 @@ public:
unsigned getNumResults() { return numResults; }
- Value *getResult(unsigned idx) { return &getOpResult(idx); }
+ ValuePtr getResult(unsigned idx) { return &getOpResult(idx); }
/// Support result iteration.
using result_range = ResultRange;
@@ -399,7 +399,7 @@ public:
operand_range getSuccessorOperands(unsigned index);
- Value *getSuccessorOperand(unsigned succIndex, unsigned opIndex) {
+ ValuePtr getSuccessorOperand(unsigned succIndex, unsigned opIndex) {
assert(!isKnownNonTerminator() && "only terminators may have successors");
assert(opIndex < getNumSuccessorOperands(succIndex));
return getOperand(getSuccessorOperandIndex(succIndex) + opIndex);
@@ -441,9 +441,9 @@ public:
Optional<std::pair<unsigned, unsigned>>
decomposeSuccessorOperandIndex(unsigned operandIndex);
- /// Returns the `BlockArgument*` corresponding to operand `operandIndex` in
+ /// Returns the `BlockArgument` corresponding to operand `operandIndex` in
/// some successor, or None if `operandIndex` isn't a successor operand index.
- Optional<BlockArgument *> getSuccessorBlockArgument(unsigned operandIndex) {
+ Optional<BlockArgumentPtr> getSuccessorBlockArgument(unsigned operandIndex) {
auto decomposed = decomposeSuccessorOperandIndex(operandIndex);
if (!decomposed.hasValue())
return None;
diff --git a/mlir/include/mlir/IR/OperationSupport.h b/mlir/include/mlir/IR/OperationSupport.h
index 23ef0ce5937..b7f63218ba5 100644
--- a/mlir/include/mlir/IR/OperationSupport.h
+++ b/mlir/include/mlir/IR/OperationSupport.h
@@ -270,7 +270,7 @@ inline llvm::hash_code hash_value(OperationName arg) {
struct OperationState {
Location location;
OperationName name;
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
/// Types of the results of this operation.
SmallVector<Type, 4> types;
SmallVector<NamedAttribute, 4> attributes;
@@ -534,8 +534,8 @@ private:
/// This class implements iteration on the types of a given range of values.
template <typename ValueIteratorT>
class ValueTypeIterator final
- : public llvm::mapped_iterator<ValueIteratorT, Type (*)(Value *)> {
- static Type unwrap(Value *value) { return value->getType(); }
+ : public llvm::mapped_iterator<ValueIteratorT, Type (*)(ValuePtr)> {
+ static Type unwrap(ValuePtr value) { return value->getType(); }
public:
using reference = Type;
@@ -545,7 +545,8 @@ public:
/// Initializes the type iterator to the specified value iterator.
ValueTypeIterator(ValueIteratorT it)
- : llvm::mapped_iterator<ValueIteratorT, Type (*)(Value *)>(it, &unwrap) {}
+ : llvm::mapped_iterator<ValueIteratorT, Type (*)(ValuePtr)>(it, &unwrap) {
+ }
};
//===----------------------------------------------------------------------===//
@@ -554,7 +555,7 @@ public:
/// This class implements the operand iterators for the Operation class.
class OperandRange final
: public detail::indexed_accessor_range_base<OperandRange, OpOperand *,
- Value *, Value *, Value *> {
+ ValuePtr, ValuePtr, ValuePtr> {
public:
using RangeBaseT::RangeBaseT;
OperandRange(Operation *op);
@@ -569,7 +570,7 @@ private:
return object + index;
}
/// See `detail::indexed_accessor_range_base` for details.
- static Value *dereference_iterator(OpOperand *object, ptrdiff_t index) {
+ static ValuePtr dereference_iterator(OpOperand *object, ptrdiff_t index) {
return object[index].get();
}
@@ -582,8 +583,8 @@ private:
/// This class implements the result iterators for the Operation class.
class ResultRange final
- : public detail::indexed_accessor_range_base<ResultRange, OpResult *,
- Value *, Value *, Value *> {
+ : public detail::indexed_accessor_range_base<ResultRange, OpResultPtr,
+ ValuePtr, ValuePtr, ValuePtr> {
public:
using RangeBaseT::RangeBaseT;
ResultRange(Operation *op);
@@ -594,11 +595,11 @@ public:
private:
/// See `detail::indexed_accessor_range_base` for details.
- static OpResult *offset_base(OpResult *object, ptrdiff_t index) {
+ static OpResultPtr offset_base(OpResultPtr object, ptrdiff_t index) {
return object + index;
}
/// See `detail::indexed_accessor_range_base` for details.
- static Value *dereference_iterator(OpResult *object, ptrdiff_t index) {
+ static ValuePtr dereference_iterator(OpResultPtr object, ptrdiff_t index) {
return &object[index];
}
@@ -610,31 +611,31 @@ private:
// ValueRange
/// This class provides an abstraction over the different types of ranges over
-/// Value*s. In many cases, this prevents the need to explicitly materialize a
+/// Values. In many cases, this prevents the need to explicitly materialize a
/// SmallVector/std::vector. This class should be used in places that are not
/// suitable for a more derived type (e.g. ArrayRef) or a template range
/// parameter.
class ValueRange final
: public detail::indexed_accessor_range_base<
- ValueRange, PointerUnion<Value *const *, OpOperand *, OpResult *>,
- Value *, Value *, Value *> {
+ ValueRange, PointerUnion<ValuePtr const *, OpOperand *, OpResultPtr>,
+ ValuePtr, ValuePtr, ValuePtr> {
public:
using RangeBaseT::RangeBaseT;
template <typename Arg,
typename = typename std::enable_if_t<
- std::is_constructible<ArrayRef<Value *>, Arg>::value &&
- !std::is_convertible<Arg, Value *>::value>>
+ std::is_constructible<ArrayRef<ValuePtr>, Arg>::value &&
+ !std::is_convertible<Arg, ValuePtr>::value>>
ValueRange(Arg &&arg)
- : ValueRange(ArrayRef<Value *>(std::forward<Arg>(arg))) {}
- ValueRange(Value *const &value) : ValueRange(&value, /*count=*/1) {}
- ValueRange(const std::initializer_list<Value *> &values)
- : ValueRange(ArrayRef<Value *>(values)) {}
+ : ValueRange(ArrayRef<ValuePtr>(std::forward<Arg>(arg))) {}
+ ValueRange(ValuePtr const &value) : ValueRange(&value, /*count=*/1) {}
+ ValueRange(const std::initializer_list<ValuePtr> &values)
+ : ValueRange(ArrayRef<ValuePtr>(values)) {}
ValueRange(iterator_range<OperandRange::iterator> values)
: ValueRange(OperandRange(values)) {}
ValueRange(iterator_range<ResultRange::iterator> values)
: ValueRange(ResultRange(values)) {}
- ValueRange(ArrayRef<Value *> values = llvm::None);
+ ValueRange(ArrayRef<ValuePtr> values = llvm::None);
ValueRange(OperandRange values);
ValueRange(ResultRange values);
@@ -645,12 +646,12 @@ public:
private:
/// The type representing the owner of this range. This is either a list of
/// values, operands, or results.
- using OwnerT = PointerUnion<Value *const *, OpOperand *, OpResult *>;
+ using OwnerT = PointerUnion<ValuePtr const *, OpOperand *, OpResultPtr>;
/// See `detail::indexed_accessor_range_base` for details.
static OwnerT offset_base(const OwnerT &owner, ptrdiff_t index);
/// See `detail::indexed_accessor_range_base` for details.
- static Value *dereference_iterator(const OwnerT &owner, ptrdiff_t index);
+ static ValuePtr dereference_iterator(const OwnerT &owner, ptrdiff_t index);
/// Allow access to `offset_base` and `dereference_iterator`.
friend RangeBaseT;
diff --git a/mlir/include/mlir/IR/TypeUtilities.h b/mlir/include/mlir/IR/TypeUtilities.h
index 2cce4dbb6cf..af22f9c4a9f 100644
--- a/mlir/include/mlir/IR/TypeUtilities.h
+++ b/mlir/include/mlir/IR/TypeUtilities.h
@@ -41,8 +41,8 @@ Type getElementTypeOrSelf(Type type);
/// Return the element type or return the type itself.
Type getElementTypeOrSelf(Attribute attr);
-Type getElementTypeOrSelf(Value *val);
-Type getElementTypeOrSelf(Value &val);
+Type getElementTypeOrSelf(ValuePtr val);
+Type getElementTypeOrSelf(ValueRef val);
/// Get the types within a nested Tuple. A helper for the class method that
/// handles storage concerns, which is tricky to do in tablegen.
@@ -72,7 +72,7 @@ LogicalResult verifyCompatibleShape(Type type1, Type type2);
// An iterator for the element types of an op's operands of shaped types.
class OperandElementTypeIterator final
: public llvm::mapped_iterator<Operation::operand_iterator,
- Type (*)(Value *)> {
+ Type (*)(ValuePtr)> {
public:
using reference = Type;
@@ -81,7 +81,7 @@ public:
explicit OperandElementTypeIterator(Operation::operand_iterator it);
private:
- static Type unwrap(Value *value);
+ static Type unwrap(ValuePtr value);
};
using OperandElementTypeRange = iterator_range<OperandElementTypeIterator>;
@@ -89,7 +89,7 @@ using OperandElementTypeRange = iterator_range<OperandElementTypeIterator>;
// An iterator for the tensor element types of an op's results of shaped types.
class ResultElementTypeIterator final
: public llvm::mapped_iterator<Operation::result_iterator,
- Type (*)(Value *)> {
+ Type (*)(ValuePtr)> {
public:
using reference = Type;
@@ -98,7 +98,7 @@ public:
explicit ResultElementTypeIterator(Operation::result_iterator it);
private:
- static Type unwrap(Value *value);
+ static Type unwrap(ValuePtr value);
};
using ResultElementTypeRange = iterator_range<ResultElementTypeIterator>;
diff --git a/mlir/include/mlir/IR/Value.h b/mlir/include/mlir/IR/Value.h
index 34c74c888cb..11cb8cdcbc7 100644
--- a/mlir/include/mlir/IR/Value.h
+++ b/mlir/include/mlir/IR/Value.h
@@ -28,10 +28,18 @@
namespace mlir {
class Block;
+class BlockArgument;
class Operation;
+class OpResult;
class Region;
class Value;
+/// Using directives that simplify the transition of Value to being value typed.
+using BlockArgumentPtr = BlockArgument *;
+using OpResultPtr = OpResult *;
+using ValueRef = Value &;
+using ValuePtr = Value *;
+
/// Operands contain a Value.
using OpOperand = IROperandImpl<Value>;
@@ -48,6 +56,15 @@ public:
~Value() {}
+ template <typename U> bool isa() const { return U::classof(this); }
+ template <typename U> U *dyn_cast() const {
+ return isa<U>() ? (U *)this : nullptr;
+ }
+ template <typename U> U *cast() const {
+ assert(isa<U>());
+ return (U *)this;
+ }
+
Kind getKind() const { return typeAndKind.getInt(); }
Type getType() const { return typeAndKind.getPointer(); }
@@ -66,7 +83,7 @@ public:
/// Replace all uses of 'this' value with the new value, updating anything in
/// the IR that uses 'this' to use the other value instead. When this returns
/// there are zero uses of 'this'.
- void replaceAllUsesWith(Value *newValue) {
+ void replaceAllUsesWith(ValuePtr newValue) {
IRObjectWithUseList::replaceAllUsesWith(newValue);
}
@@ -100,7 +117,7 @@ private:
llvm::PointerIntPair<Type, 1, Kind> typeAndKind;
};
-inline raw_ostream &operator<<(raw_ostream &os, Value &value) {
+inline raw_ostream &operator<<(raw_ostream &os, ValueRef value) {
value.print(os);
return os;
}
@@ -160,7 +177,6 @@ private:
/// through bitpacking shenanigans.
Operation *const owner;
};
-
} // namespace mlir
#endif
diff --git a/mlir/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h b/mlir/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h
index 070b3c36e8c..202e86566fc 100644
--- a/mlir/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h
+++ b/mlir/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h
@@ -163,7 +163,7 @@ public:
}
virtual Operation *getOp() const = 0;
- virtual Value *getValue() const = 0;
+ virtual ValuePtr getValue() const = 0;
static bool classof(const CAGNode *n) {
return n->getKind() >= Kind::Anchor && n->getKind() <= Kind::LastAnchor;
@@ -210,7 +210,7 @@ public:
return n->getKind() == Kind::Anchor || n->getKind() == Kind::OperandAnchor;
}
- Value *getValue() const final { return op->getOperand(operandIdx); }
+ ValuePtr getValue() const final { return op->getOperand(operandIdx); }
void printLabel(raw_ostream &os) const override;
@@ -221,7 +221,7 @@ private:
/// An anchor tied to a specific result.
/// Since a result is already anchored to its defining op, result anchors refer
-/// directly to the underlying Value*.
+/// directly to the underlying Value.
class CAGResultAnchor : public CAGAnchorNode {
public:
CAGResultAnchor(Operation *op, unsigned resultIdx);
@@ -231,12 +231,12 @@ public:
}
Operation *getOp() const final { return resultValue->getDefiningOp(); }
- Value *getValue() const final { return resultValue; }
+ ValuePtr getValue() const final { return resultValue; }
void printLabel(raw_ostream &os) const override;
private:
- Value *resultValue;
+ ValuePtr resultValue;
};
/// Base class for constraint nodes.
diff --git a/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h b/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h
index 7adb4aac2e2..7464e2a347d 100644
--- a/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h
+++ b/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h
@@ -113,7 +113,7 @@ private:
protected:
// Mappings between original and translated values, used for lookups.
llvm::StringMap<llvm::Function *> functionMapping;
- DenseMap<Value *, llvm::Value *> valueMapping;
+ DenseMap<ValuePtr, llvm::Value *> valueMapping;
DenseMap<Block *, llvm::BasicBlock *> blockMapping;
};
diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h
index 814f2202f01..f9f1207c0a0 100644
--- a/mlir/include/mlir/Transforms/DialectConversion.h
+++ b/mlir/include/mlir/Transforms/DialectConversion.h
@@ -60,7 +60,7 @@ public:
/// remaps an existing signature input.
struct InputMapping {
size_t inputNo, size;
- Value *replacementValue;
+ ValuePtr replacementValue;
};
/// Return the argument types for the new signature.
@@ -90,7 +90,7 @@ public:
/// Remap an input of the original signature to another `replacement`
/// value. This drops the original argument.
- void remapInput(unsigned origInputNo, Value *replacement);
+ void remapInput(unsigned origInputNo, ValuePtr replacement);
private:
/// The remapping information for each of the original arguments.
@@ -143,7 +143,7 @@ public:
/// the conversion has finished.
virtual Operation *materializeConversion(PatternRewriter &rewriter,
Type resultType,
- ArrayRef<Value *> inputs,
+ ArrayRef<ValuePtr> inputs,
Location loc) {
llvm_unreachable("expected 'materializeConversion' to be overridden");
}
@@ -172,7 +172,7 @@ public:
/// ConversionPattern ever needs to replace an operation that does not
/// have successors. This function should not fail. If some specific cases of
/// the operation are not supported, these cases should not be matched.
- virtual void rewrite(Operation *op, ArrayRef<Value *> operands,
+ virtual void rewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
llvm_unreachable("unimplemented rewrite");
}
@@ -187,18 +187,18 @@ public:
/// terminator operation that has successors. This function should not fail
/// the pass. If some specific cases of the operation are not supported,
/// these cases should not be matched.
- virtual void rewrite(Operation *op, ArrayRef<Value *> properOperands,
+ virtual void rewrite(Operation *op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const {
llvm_unreachable("unimplemented rewrite for terminators");
}
/// Hook for derived classes to implement combined matching and rewriting.
virtual PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> properOperands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const {
if (!match(op))
return matchFailure();
@@ -208,7 +208,7 @@ public:
/// Hook for derived classes to implement combined matching and rewriting.
virtual PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
if (!match(op))
return matchFailure();
@@ -234,27 +234,27 @@ struct OpConversionPattern : public ConversionPattern {
/// Wrappers around the ConversionPattern methods that pass the derived op
/// type.
- void rewrite(Operation *op, ArrayRef<Value *> operands,
+ void rewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
rewrite(cast<SourceOp>(op), operands, rewriter);
}
- void rewrite(Operation *op, ArrayRef<Value *> properOperands,
+ void rewrite(Operation *op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const final {
rewrite(cast<SourceOp>(op), properOperands, destinations, operands,
rewriter);
}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> properOperands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const final {
return matchAndRewrite(cast<SourceOp>(op), properOperands, destinations,
operands, rewriter);
}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
return matchAndRewrite(cast<SourceOp>(op), operands, rewriter);
}
@@ -264,22 +264,22 @@ struct OpConversionPattern : public ConversionPattern {
/// Rewrite and Match methods that operate on the SourceOp type. These must be
/// overridden by the derived pattern class.
- virtual void rewrite(SourceOp op, ArrayRef<Value *> operands,
+ virtual void rewrite(SourceOp op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
llvm_unreachable("must override matchAndRewrite or a rewrite method");
}
- virtual void rewrite(SourceOp op, ArrayRef<Value *> properOperands,
+ virtual void rewrite(SourceOp op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const {
llvm_unreachable("unimplemented rewrite for terminators");
}
virtual PatternMatchResult
- matchAndRewrite(SourceOp op, ArrayRef<Value *> properOperands,
+ matchAndRewrite(SourceOp op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const {
if (!match(op))
return matchFailure();
@@ -288,7 +288,7 @@ struct OpConversionPattern : public ConversionPattern {
}
virtual PatternMatchResult
- matchAndRewrite(SourceOp op, ArrayRef<Value *> operands,
+ matchAndRewrite(SourceOp op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
if (!match(op))
return matchFailure();
@@ -330,11 +330,11 @@ public:
TypeConverter::SignatureConversion &conversion);
/// Replace all the uses of the block argument `from` with value `to`.
- void replaceUsesOfBlockArgument(BlockArgument *from, Value *to);
+ void replaceUsesOfBlockArgument(BlockArgumentPtr from, ValuePtr to);
/// Return the converted value that replaces 'key'. Return 'key' if there is
/// no such a converted value.
- Value *getRemappedValue(Value *key);
+ ValuePtr getRemappedValue(ValuePtr key);
//===--------------------------------------------------------------------===//
// PatternRewriter Hooks
diff --git a/mlir/include/mlir/Transforms/FoldUtils.h b/mlir/include/mlir/Transforms/FoldUtils.h
index bdf88d3bfb2..65dd1b6df16 100644
--- a/mlir/include/mlir/Transforms/FoldUtils.h
+++ b/mlir/include/mlir/Transforms/FoldUtils.h
@@ -82,7 +82,7 @@ public:
/// and immediately try to fold it. This function populates 'results' with
/// the results after folding the operation.
template <typename OpTy, typename... Args>
- void create(OpBuilder &builder, SmallVectorImpl<Value *> &results,
+ void create(OpBuilder &builder, SmallVectorImpl<ValuePtr> &results,
Location location, Args &&... args) {
Operation *op = builder.create<OpTy>(location, std::forward<Args>(args)...);
if (failed(tryToFold(op, results)))
@@ -94,9 +94,9 @@ public:
/// Overload to create or fold a single result operation.
template <typename OpTy, typename... Args>
typename std::enable_if<OpTy::template hasTrait<OpTrait::OneResult>(),
- Value *>::type
+ ValuePtr>::type
create(OpBuilder &builder, Location location, Args &&... args) {
- SmallVector<Value *, 1> results;
+ SmallVector<ValuePtr, 1> results;
create<OpTy>(builder, results, location, std::forward<Args>(args)...);
return results.front();
}
@@ -107,7 +107,7 @@ public:
OpTy>::type
create(OpBuilder &builder, Location location, Args &&... args) {
auto op = builder.create<OpTy>(location, std::forward<Args>(args)...);
- SmallVector<Value *, 0> unused;
+ SmallVector<ValuePtr, 0> unused;
(void)tryToFold(op.getOperation(), unused);
// Folding cannot remove a zero-result operation, so for convenience we
@@ -126,7 +126,7 @@ private:
/// Tries to perform folding on the given `op`. If successful, populates
/// `results` with the results of the folding.
LogicalResult tryToFold(
- Operation *op, SmallVectorImpl<Value *> &results,
+ Operation *op, SmallVectorImpl<ValuePtr> &results,
function_ref<void(Operation *)> processGeneratedConstants = nullptr);
/// Try to get or create a new constant entry. On success this returns the
diff --git a/mlir/include/mlir/Transforms/InliningUtils.h b/mlir/include/mlir/Transforms/InliningUtils.h
index 590b46a5d12..47c4f48f468 100644
--- a/mlir/include/mlir/Transforms/InliningUtils.h
+++ b/mlir/include/mlir/Transforms/InliningUtils.h
@@ -105,7 +105,7 @@ public:
/// operation). The given 'op' will be removed by the caller, after this
/// function has been called.
virtual void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToReplace) const {
+ ArrayRef<ValuePtr> valuesToReplace) const {
llvm_unreachable(
"must implement handleTerminator in the case of one inlined block");
}
@@ -125,8 +125,8 @@ public:
/// ... = foo.call @foo(%input : i32) -> i16
///
/// NOTE: This hook may be invoked before the 'isLegal' checks above.
- virtual Operation *materializeCallConversion(OpBuilder &builder, Value *input,
- Type resultType,
+ virtual Operation *materializeCallConversion(OpBuilder &builder,
+ ValuePtr input, Type resultType,
Location conversionLoc) const {
return nullptr;
}
@@ -165,7 +165,7 @@ public:
virtual void handleTerminator(Operation *op, Block *newDest) const;
virtual void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const;
+ ArrayRef<ValuePtr> valuesToRepl) const;
};
//===----------------------------------------------------------------------===//
@@ -187,7 +187,7 @@ public:
/// be cloned into the 'inlinePoint' or spliced directly.
LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
Operation *inlinePoint, BlockAndValueMapping &mapper,
- ArrayRef<Value *> resultsToReplace,
+ ArrayRef<ValuePtr> resultsToReplace,
Optional<Location> inlineLoc = llvm::None,
bool shouldCloneInlinedRegion = true);
@@ -196,8 +196,8 @@ LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
/// in-favor of the region arguments when inlining.
LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
Operation *inlinePoint,
- ArrayRef<Value *> inlinedOperands,
- ArrayRef<Value *> resultsToReplace,
+ ArrayRef<ValuePtr> inlinedOperands,
+ ArrayRef<ValuePtr> resultsToReplace,
Optional<Location> inlineLoc = llvm::None,
bool shouldCloneInlinedRegion = true);
diff --git a/mlir/include/mlir/Transforms/LoopLikeInterface.td b/mlir/include/mlir/Transforms/LoopLikeInterface.td
index 5c324b79f67..583cfe26d87 100644
--- a/mlir/include/mlir/Transforms/LoopLikeInterface.td
+++ b/mlir/include/mlir/Transforms/LoopLikeInterface.td
@@ -38,7 +38,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
explicit capture of dependencies, an implementation could check whether
the value corresponds to a captured dependency.
}],
- "bool", "isDefinedOutsideOfLoop", (ins "Value *":$value)
+ "bool", "isDefinedOutsideOfLoop", (ins "ValuePtr ":$value)
>,
InterfaceMethod<[{
Returns the region that makes up the body of the loop and should be
diff --git a/mlir/include/mlir/Transforms/LoopUtils.h b/mlir/include/mlir/Transforms/LoopUtils.h
index 5ca3f7f6510..37434ea2ea8 100644
--- a/mlir/include/mlir/Transforms/LoopUtils.h
+++ b/mlir/include/mlir/Transforms/LoopUtils.h
@@ -85,7 +85,7 @@ void promoteSingleIterationLoops(FuncOp f);
/// expression.
void getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
- SmallVectorImpl<Value *> *operands,
+ SmallVectorImpl<ValuePtr> *operands,
OpBuilder &builder);
/// Skew the operations in the body of a 'affine.for' operation with the
@@ -140,7 +140,7 @@ SmallVector<SmallVector<AffineForOp, 8>, 8> tile(ArrayRef<AffineForOp> forOps,
ArrayRef<uint64_t> sizes,
ArrayRef<AffineForOp> targets);
SmallVector<Loops, 8> tile(ArrayRef<loop::ForOp> forOps,
- ArrayRef<Value *> sizes,
+ ArrayRef<ValuePtr> sizes,
ArrayRef<loop::ForOp> targets);
/// Performs tiling (with interchange) by strip-mining the `forOps` by `sizes`
@@ -149,7 +149,7 @@ SmallVector<Loops, 8> tile(ArrayRef<loop::ForOp> forOps,
/// `target`.
SmallVector<AffineForOp, 8> tile(ArrayRef<AffineForOp> forOps,
ArrayRef<uint64_t> sizes, AffineForOp target);
-Loops tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value *> sizes,
+Loops tile(ArrayRef<loop::ForOp> forOps, ArrayRef<ValuePtr> sizes,
loop::ForOp target);
/// Tile a nest of loop::ForOp loops rooted at `rootForOp` with the given
@@ -157,7 +157,7 @@ Loops tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value *> sizes,
/// runtime. If more sizes than loops are provided, discard the trailing values
/// in sizes. Assumes the loop nest is permutable.
/// Returns the newly created intra-tile loops.
-Loops tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef<Value *> sizes);
+Loops tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef<ValuePtr> sizes);
/// Explicit copy / DMA generation options for mlir::affineDataCopyGenerate.
struct AffineCopyOptions {
@@ -229,8 +229,8 @@ void coalesceLoops(MutableArrayRef<loop::ForOp> loops);
/// ...
/// }
/// ```
-void mapLoopToProcessorIds(loop::ForOp forOp, ArrayRef<Value *> processorId,
- ArrayRef<Value *> numProcessors);
+void mapLoopToProcessorIds(loop::ForOp forOp, ArrayRef<ValuePtr> processorId,
+ ArrayRef<ValuePtr> numProcessors);
} // end namespace mlir
#endif // MLIR_TRANSFORMS_LOOP_UTILS_H
diff --git a/mlir/include/mlir/Transforms/RegionUtils.h b/mlir/include/mlir/Transforms/RegionUtils.h
index 48080b26c2c..63236d6a5a0 100644
--- a/mlir/include/mlir/Transforms/RegionUtils.h
+++ b/mlir/include/mlir/Transforms/RegionUtils.h
@@ -30,14 +30,14 @@ namespace mlir {
/// of `limit`.
template <typename Range>
bool areValuesDefinedAbove(Range values, Region &limit) {
- for (Value *v : values)
+ for (ValuePtr v : values)
if (!v->getParentRegion()->isProperAncestor(&limit))
return false;
return true;
}
/// Replace all uses of `orig` within the given region with `replacement`.
-void replaceAllUsesInRegionWith(Value *orig, Value *replacement,
+void replaceAllUsesInRegionWith(ValuePtr orig, ValuePtr replacement,
Region &region);
/// Calls `callback` for each use of a value within `region` or its descendants
@@ -53,12 +53,12 @@ void visitUsedValuesDefinedAbove(MutableArrayRef<Region> regions,
/// Fill `values` with a list of values defined at the ancestors of the `limit`
/// region and used within `region` or its descendants.
void getUsedValuesDefinedAbove(Region &region, Region &limit,
- llvm::SetVector<Value *> &values);
+ llvm::SetVector<ValuePtr> &values);
/// Fill `values` with a list of values used within any of the regions provided
/// but defined in one of the ancestors.
void getUsedValuesDefinedAbove(MutableArrayRef<Region> regions,
- llvm::SetVector<Value *> &values);
+ llvm::SetVector<ValuePtr> &values);
/// Run a set of structural simplifications over the given regions. This
/// includes transformations like unreachable block elimination, dead argument
diff --git a/mlir/include/mlir/Transforms/Utils.h b/mlir/include/mlir/Transforms/Utils.h
index c682b48f331..02c368ec496 100644
--- a/mlir/include/mlir/Transforms/Utils.h
+++ b/mlir/include/mlir/Transforms/Utils.h
@@ -66,22 +66,22 @@ class OpBuilder;
// extra operands, note that 'indexRemap' would just be applied to existing
// indices (%i, %j).
// TODO(bondhugula): allow extraIndices to be added at any position.
-LogicalResult replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
- ArrayRef<Value *> extraIndices = {},
+LogicalResult replaceAllMemRefUsesWith(ValuePtr oldMemRef, ValuePtr newMemRef,
+ ArrayRef<ValuePtr> extraIndices = {},
AffineMap indexRemap = AffineMap(),
- ArrayRef<Value *> extraOperands = {},
- ArrayRef<Value *> symbolOperands = {},
+ ArrayRef<ValuePtr> extraOperands = {},
+ ArrayRef<ValuePtr> symbolOperands = {},
Operation *domInstFilter = nullptr,
Operation *postDomInstFilter = nullptr);
/// Performs the same replacement as the other version above but only for the
/// dereferencing uses of `oldMemRef` in `op`.
-LogicalResult replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
+LogicalResult replaceAllMemRefUsesWith(ValuePtr oldMemRef, ValuePtr newMemRef,
Operation *op,
- ArrayRef<Value *> extraIndices = {},
+ ArrayRef<ValuePtr> extraIndices = {},
AffineMap indexRemap = AffineMap(),
- ArrayRef<Value *> extraOperands = {},
- ArrayRef<Value *> symbolOperands = {});
+ ArrayRef<ValuePtr> extraOperands = {},
+ ArrayRef<ValuePtr> symbolOperands = {});
/// Rewrites the memref defined by this alloc op to have an identity layout map
/// and updates all its indexing uses. Returns failure if any of its uses
@@ -96,9 +96,9 @@ LogicalResult normalizeMemRef(AllocOp op);
/// The final results of the composed AffineApplyOp are returned in output
/// parameter 'results'. Returns the affine apply op created.
Operation *createComposedAffineApplyOp(OpBuilder &builder, Location loc,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
ArrayRef<Operation *> affineApplyOps,
- SmallVectorImpl<Value *> *results);
+ SmallVectorImpl<ValuePtr> *results);
/// Given an operation, inserts one or more single result affine apply
/// operations, results of which are exclusively used by this operation.
diff --git a/mlir/lib/Analysis/AffineAnalysis.cpp b/mlir/lib/Analysis/AffineAnalysis.cpp
index 97868a56524..60b2f17292b 100644
--- a/mlir/lib/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Analysis/AffineAnalysis.cpp
@@ -48,15 +48,15 @@ using llvm::dbgs;
// TODO(andydavis) Add a method to AffineApplyOp which forward substitutes
// the AffineApplyOp into any user AffineApplyOps.
void mlir::getReachableAffineApplyOps(
- ArrayRef<Value *> operands, SmallVectorImpl<Operation *> &affineApplyOps) {
+ ArrayRef<ValuePtr> operands, SmallVectorImpl<Operation *> &affineApplyOps) {
struct State {
// The ssa value for this node in the DFS traversal.
- Value *value;
+ ValuePtr value;
// The operand index of 'value' to explore next during DFS traversal.
unsigned operandIndex;
};
SmallVector<State, 4> worklist;
- for (auto *operand : operands) {
+ for (auto operand : operands) {
worklist.push_back({operand, 0});
}
@@ -77,7 +77,7 @@ void mlir::getReachableAffineApplyOps(
if (state.operandIndex < opInst->getNumOperands()) {
// Visit: Add next 'affineApplyOp' operand to worklist.
// Get next operand to visit at 'operandIndex'.
- auto *nextOperand = opInst->getOperand(state.operandIndex);
+ auto nextOperand = opInst->getOperand(state.operandIndex);
// Increment 'operandIndex' in 'state'.
++state.operandIndex;
// Add 'nextOperand' to worklist.
@@ -99,7 +99,7 @@ void mlir::getReachableAffineApplyOps(
// setExprStride(ArrayRef<int64_t> expr, int64_t stride)
LogicalResult mlir::getIndexSet(MutableArrayRef<AffineForOp> forOps,
FlatAffineConstraints *domain) {
- SmallVector<Value *, 4> indices;
+ SmallVector<ValuePtr, 4> indices;
extractForInductionVars(forOps, &indices);
// Reset while associated Values in 'indices' to the domain.
domain->reset(forOps.size(), /*numSymbols=*/0, /*numLocals=*/0, indices);
@@ -146,25 +146,25 @@ static LogicalResult getInstIndexSet(Operation *op,
// of maps to check. So getSrcDimOrSymPos would be "getPos(value, {0, 2})".
class ValuePositionMap {
public:
- void addSrcValue(Value *value) {
+ void addSrcValue(ValuePtr value) {
if (addValueAt(value, &srcDimPosMap, numSrcDims))
++numSrcDims;
}
- void addDstValue(Value *value) {
+ void addDstValue(ValuePtr value) {
if (addValueAt(value, &dstDimPosMap, numDstDims))
++numDstDims;
}
- void addSymbolValue(Value *value) {
+ void addSymbolValue(ValuePtr value) {
if (addValueAt(value, &symbolPosMap, numSymbols))
++numSymbols;
}
- unsigned getSrcDimOrSymPos(Value *value) const {
+ unsigned getSrcDimOrSymPos(ValuePtr value) const {
return getDimOrSymPos(value, srcDimPosMap, 0);
}
- unsigned getDstDimOrSymPos(Value *value) const {
+ unsigned getDstDimOrSymPos(ValuePtr value) const {
return getDimOrSymPos(value, dstDimPosMap, numSrcDims);
}
- unsigned getSymPos(Value *value) const {
+ unsigned getSymPos(ValuePtr value) const {
auto it = symbolPosMap.find(value);
assert(it != symbolPosMap.end());
return numSrcDims + numDstDims + it->second;
@@ -176,7 +176,7 @@ public:
unsigned getNumSymbols() const { return numSymbols; }
private:
- bool addValueAt(Value *value, DenseMap<Value *, unsigned> *posMap,
+ bool addValueAt(ValuePtr value, DenseMap<ValuePtr, unsigned> *posMap,
unsigned position) {
auto it = posMap->find(value);
if (it == posMap->end()) {
@@ -185,8 +185,8 @@ private:
}
return false;
}
- unsigned getDimOrSymPos(Value *value,
- const DenseMap<Value *, unsigned> &dimPosMap,
+ unsigned getDimOrSymPos(ValuePtr value,
+ const DenseMap<ValuePtr, unsigned> &dimPosMap,
unsigned dimPosOffset) const {
auto it = dimPosMap.find(value);
if (it != dimPosMap.end()) {
@@ -200,9 +200,9 @@ private:
unsigned numSrcDims = 0;
unsigned numDstDims = 0;
unsigned numSymbols = 0;
- DenseMap<Value *, unsigned> srcDimPosMap;
- DenseMap<Value *, unsigned> dstDimPosMap;
- DenseMap<Value *, unsigned> symbolPosMap;
+ DenseMap<ValuePtr, unsigned> srcDimPosMap;
+ DenseMap<ValuePtr, unsigned> dstDimPosMap;
+ DenseMap<ValuePtr, unsigned> symbolPosMap;
};
// Builds a map from Value to identifier position in a new merged identifier
@@ -219,9 +219,9 @@ static void buildDimAndSymbolPositionMaps(
const FlatAffineConstraints &dstDomain, const AffineValueMap &srcAccessMap,
const AffineValueMap &dstAccessMap, ValuePositionMap *valuePosMap,
FlatAffineConstraints *dependenceConstraints) {
- auto updateValuePosMap = [&](ArrayRef<Value *> values, bool isSrc) {
+ auto updateValuePosMap = [&](ArrayRef<ValuePtr> values, bool isSrc) {
for (unsigned i = 0, e = values.size(); i < e; ++i) {
- auto *value = values[i];
+ auto value = values[i];
if (!isForInductionVar(values[i])) {
assert(isValidSymbol(values[i]) &&
"access operand has to be either a loop IV or a symbol");
@@ -234,7 +234,7 @@ static void buildDimAndSymbolPositionMaps(
}
};
- SmallVector<Value *, 4> srcValues, destValues;
+ SmallVector<ValuePtr, 4> srcValues, destValues;
srcDomain.getIdValues(0, srcDomain.getNumDimAndSymbolIds(), &srcValues);
dstDomain.getIdValues(0, dstDomain.getNumDimAndSymbolIds(), &destValues);
// Update value position map with identifiers from src iteration domain.
@@ -273,7 +273,7 @@ void initDependenceConstraints(const FlatAffineConstraints &srcDomain,
numLocals);
// Set values corresponding to dependence constraint identifiers.
- SmallVector<Value *, 4> srcLoopIVs, dstLoopIVs;
+ SmallVector<ValuePtr, 4> srcLoopIVs, dstLoopIVs;
srcDomain.getIdValues(0, srcDomain.getNumDimIds(), &srcLoopIVs);
dstDomain.getIdValues(0, dstDomain.getNumDimIds(), &dstLoopIVs);
@@ -282,8 +282,8 @@ void initDependenceConstraints(const FlatAffineConstraints &srcDomain,
srcLoopIVs.size(), srcLoopIVs.size() + dstLoopIVs.size(), dstLoopIVs);
// Set values for the symbolic identifier dimensions.
- auto setSymbolIds = [&](ArrayRef<Value *> values) {
- for (auto *value : values) {
+ auto setSymbolIds = [&](ArrayRef<ValuePtr> values) {
+ for (auto value : values) {
if (!isForInductionVar(value)) {
assert(isValidSymbol(value) && "expected symbol");
dependenceConstraints->setIdValue(valuePosMap.getSymPos(value), value);
@@ -294,7 +294,7 @@ void initDependenceConstraints(const FlatAffineConstraints &srcDomain,
setSymbolIds(srcAccessMap.getOperands());
setSymbolIds(dstAccessMap.getOperands());
- SmallVector<Value *, 8> srcSymbolValues, dstSymbolValues;
+ SmallVector<ValuePtr, 8> srcSymbolValues, dstSymbolValues;
srcDomain.getIdValues(srcDomain.getNumDimIds(),
srcDomain.getNumDimAndSymbolIds(), &srcSymbolValues);
dstDomain.getIdValues(dstDomain.getNumDimIds(),
@@ -398,10 +398,10 @@ addMemRefAccessConstraints(const AffineValueMap &srcAccessMap,
unsigned numResults = srcMap.getNumResults();
unsigned srcNumIds = srcMap.getNumDims() + srcMap.getNumSymbols();
- ArrayRef<Value *> srcOperands = srcAccessMap.getOperands();
+ ArrayRef<ValuePtr> srcOperands = srcAccessMap.getOperands();
unsigned dstNumIds = dstMap.getNumDims() + dstMap.getNumSymbols();
- ArrayRef<Value *> dstOperands = dstAccessMap.getOperands();
+ ArrayRef<ValuePtr> dstOperands = dstAccessMap.getOperands();
std::vector<SmallVector<int64_t, 8>> srcFlatExprs;
std::vector<SmallVector<int64_t, 8>> destFlatExprs;
@@ -457,11 +457,11 @@ addMemRefAccessConstraints(const AffineValueMap &srcAccessMap,
}
// Add equality constraints for any operands that are defined by constant ops.
- auto addEqForConstOperands = [&](ArrayRef<Value *> operands) {
+ auto addEqForConstOperands = [&](ArrayRef<ValuePtr> operands) {
for (unsigned i = 0, e = operands.size(); i < e; ++i) {
if (isForInductionVar(operands[i]))
continue;
- auto *symbol = operands[i];
+ auto symbol = operands[i];
assert(isValidSymbol(symbol));
// Check if the symbol is a constant.
if (auto cOp = dyn_cast_or_null<ConstantIndexOp>(symbol->getDefiningOp()))
@@ -553,7 +553,7 @@ static Block *getCommonBlock(const MemRefAccess &srcAccess,
}
return block;
}
- auto *commonForValue = srcDomain.getIdValue(numCommonLoops - 1);
+ auto commonForValue = srcDomain.getIdValue(numCommonLoops - 1);
auto forOp = getForInductionVarOwner(commonForValue);
assert(forOp && "commonForValue was not an induction variable");
return forOp.getBody();
@@ -675,7 +675,7 @@ void MemRefAccess::getAccessMap(AffineValueMap *accessMap) const {
map = loadOp.getAffineMap();
else if (auto storeOp = dyn_cast<AffineStoreOp>(opInst))
map = storeOp.getAffineMap();
- SmallVector<Value *, 8> operands(indices.begin(), indices.end());
+ SmallVector<ValuePtr, 8> operands(indices.begin(), indices.end());
fullyComposeAffineMapAndOperands(&map, &operands);
map = simplifyAffineMap(map);
canonicalizeMapAndOperands(&map, &operands);
diff --git a/mlir/lib/Analysis/AffineStructures.cpp b/mlir/lib/Analysis/AffineStructures.cpp
index d678355880e..21c2830c016 100644
--- a/mlir/lib/Analysis/AffineStructures.cpp
+++ b/mlir/lib/Analysis/AffineStructures.cpp
@@ -204,8 +204,8 @@ MutableIntegerSet::MutableIntegerSet(unsigned numDims, unsigned numSymbols,
// AffineValueMap.
//===----------------------------------------------------------------------===//
-AffineValueMap::AffineValueMap(AffineMap map, ArrayRef<Value *> operands,
- ArrayRef<Value *> results)
+AffineValueMap::AffineValueMap(AffineMap map, ArrayRef<ValuePtr> operands,
+ ArrayRef<ValuePtr> results)
: map(map), operands(operands.begin(), operands.end()),
results(results.begin(), results.end()) {}
@@ -219,8 +219,8 @@ AffineValueMap::AffineValueMap(AffineBound bound)
: map(bound.getMap()),
operands(bound.operand_begin(), bound.operand_end()) {}
-void AffineValueMap::reset(AffineMap map, ArrayRef<Value *> operands,
- ArrayRef<Value *> results) {
+void AffineValueMap::reset(AffineMap map, ArrayRef<ValuePtr> operands,
+ ArrayRef<ValuePtr> results) {
this->map.reset(map);
this->operands.assign(operands.begin(), operands.end());
this->results.assign(results.begin(), results.end());
@@ -232,14 +232,14 @@ void AffineValueMap::difference(const AffineValueMap &a,
// Fully compose A's map + operands.
auto aMap = a.getAffineMap();
- SmallVector<Value *, 4> aOperands(a.getOperands().begin(),
- a.getOperands().end());
+ SmallVector<ValuePtr, 4> aOperands(a.getOperands().begin(),
+ a.getOperands().end());
fullyComposeAffineMapAndOperands(&aMap, &aOperands);
// Use the affine apply normalizer to get B's map into A's coordinate space.
AffineApplyNormalizer normalizer(aMap, aOperands);
- SmallVector<Value *, 4> bOperands(b.getOperands().begin(),
- b.getOperands().end());
+ SmallVector<ValuePtr, 4> bOperands(b.getOperands().begin(),
+ b.getOperands().end());
auto bMap = b.getAffineMap();
normalizer.normalize(&bMap, &bOperands);
@@ -263,7 +263,7 @@ void AffineValueMap::difference(const AffineValueMap &a,
// Returns true and sets 'indexOfMatch' if 'valueToMatch' is found in
// 'valuesToSearch' beginning at 'indexStart'. Returns false otherwise.
-static bool findIndex(Value *valueToMatch, ArrayRef<Value *> valuesToSearch,
+static bool findIndex(ValuePtr valueToMatch, ArrayRef<ValuePtr> valuesToSearch,
unsigned indexStart, unsigned *indexOfMatch) {
unsigned size = valuesToSearch.size();
for (unsigned i = indexStart; i < size; ++i) {
@@ -281,7 +281,7 @@ inline bool AffineValueMap::isMultipleOf(unsigned idx, int64_t factor) const {
/// This method uses the invariant that operands are always positionally aligned
/// with the AffineDimExpr in the underlying AffineMap.
-bool AffineValueMap::isFunctionOf(unsigned idx, Value *value) const {
+bool AffineValueMap::isFunctionOf(unsigned idx, ValuePtr value) const {
unsigned index;
if (!findIndex(value, operands, /*indexStart=*/0, &index)) {
return false;
@@ -292,12 +292,12 @@ bool AffineValueMap::isFunctionOf(unsigned idx, Value *value) const {
return expr.isFunctionOfDim(index);
}
-Value *AffineValueMap::getOperand(unsigned i) const {
- return static_cast<Value *>(operands[i]);
+ValuePtr AffineValueMap::getOperand(unsigned i) const {
+ return static_cast<ValuePtr>(operands[i]);
}
-ArrayRef<Value *> AffineValueMap::getOperands() const {
- return ArrayRef<Value *>(operands);
+ArrayRef<ValuePtr> AffineValueMap::getOperands() const {
+ return ArrayRef<ValuePtr>(operands);
}
AffineMap AffineValueMap::getAffineMap() const { return map.getAffineMap(); }
@@ -378,7 +378,7 @@ void FlatAffineConstraints::reset(unsigned numReservedInequalities,
unsigned newNumReservedCols,
unsigned newNumDims, unsigned newNumSymbols,
unsigned newNumLocals,
- ArrayRef<Value *> idArgs) {
+ ArrayRef<ValuePtr> idArgs) {
assert(newNumReservedCols >= newNumDims + newNumSymbols + newNumLocals + 1 &&
"minimum 1 column");
numReservedCols = newNumReservedCols;
@@ -401,7 +401,7 @@ void FlatAffineConstraints::reset(unsigned numReservedInequalities,
void FlatAffineConstraints::reset(unsigned newNumDims, unsigned newNumSymbols,
unsigned newNumLocals,
- ArrayRef<Value *> idArgs) {
+ ArrayRef<ValuePtr> idArgs) {
reset(0, 0, newNumDims + newNumSymbols + newNumLocals + 1, newNumDims,
newNumSymbols, newNumLocals, idArgs);
}
@@ -428,17 +428,17 @@ void FlatAffineConstraints::addLocalId(unsigned pos) {
addId(IdKind::Local, pos);
}
-void FlatAffineConstraints::addDimId(unsigned pos, Value *id) {
+void FlatAffineConstraints::addDimId(unsigned pos, ValuePtr id) {
addId(IdKind::Dimension, pos, id);
}
-void FlatAffineConstraints::addSymbolId(unsigned pos, Value *id) {
+void FlatAffineConstraints::addSymbolId(unsigned pos, ValuePtr id) {
addId(IdKind::Symbol, pos, id);
}
/// Adds a dimensional identifier. The added column is initialized to
/// zero.
-void FlatAffineConstraints::addId(IdKind kind, unsigned pos, Value *id) {
+void FlatAffineConstraints::addId(IdKind kind, unsigned pos, ValuePtr id) {
if (kind == IdKind::Dimension) {
assert(pos <= getNumDimIds());
} else if (kind == IdKind::Symbol) {
@@ -527,7 +527,7 @@ bool FlatAffineConstraints::areIdsAlignedWithOther(
/// Checks if the SSA values associated with `cst''s identifiers are unique.
static bool LLVM_ATTRIBUTE_UNUSED
areIdsUnique(const FlatAffineConstraints &cst) {
- SmallPtrSet<Value *, 8> uniqueIds;
+ SmallPtrSet<ValuePtr, 8> uniqueIds;
for (auto id : cst.getIds()) {
if (id.hasValue() && !uniqueIds.insert(id.getValue()).second)
return false;
@@ -571,11 +571,11 @@ static void mergeAndAlignIds(unsigned offset, FlatAffineConstraints *A,
assert(std::all_of(A->getIds().begin() + offset,
A->getIds().begin() + A->getNumDimAndSymbolIds(),
- [](Optional<Value *> id) { return id.hasValue(); }));
+ [](Optional<ValuePtr> id) { return id.hasValue(); }));
assert(std::all_of(B->getIds().begin() + offset,
B->getIds().begin() + B->getNumDimAndSymbolIds(),
- [](Optional<Value *> id) { return id.hasValue(); }));
+ [](Optional<ValuePtr> id) { return id.hasValue(); }));
// Place local id's of A after local id's of B.
for (unsigned l = 0, e = A->getNumLocalIds(); l < e; l++) {
@@ -586,13 +586,13 @@ static void mergeAndAlignIds(unsigned offset, FlatAffineConstraints *A,
A->addLocalId(A->getNumLocalIds());
}
- SmallVector<Value *, 4> aDimValues, aSymValues;
+ SmallVector<ValuePtr, 4> aDimValues, aSymValues;
A->getIdValues(offset, A->getNumDimIds(), &aDimValues);
A->getIdValues(A->getNumDimIds(), A->getNumDimAndSymbolIds(), &aSymValues);
{
// Merge dims from A into B.
unsigned d = offset;
- for (auto *aDimValue : aDimValues) {
+ for (auto aDimValue : aDimValues) {
unsigned loc;
if (B->findId(*aDimValue, &loc)) {
assert(loc >= offset && "A's dim appears in B's aligned range");
@@ -615,7 +615,7 @@ static void mergeAndAlignIds(unsigned offset, FlatAffineConstraints *A,
{
// Merge symbols: merge A's symbols into B first.
unsigned s = B->getNumDimIds();
- for (auto *aSymValue : aSymValues) {
+ for (auto aSymValue : aSymValues) {
unsigned loc;
if (B->findId(*aSymValue, &loc)) {
assert(loc >= B->getNumDimIds() && loc < B->getNumDimAndSymbolIds() &&
@@ -785,7 +785,7 @@ LogicalResult FlatAffineConstraints::composeMatchingMap(AffineMap other) {
}
// Turn a dimension into a symbol.
-static void turnDimIntoSymbol(FlatAffineConstraints *cst, Value &id) {
+static void turnDimIntoSymbol(FlatAffineConstraints *cst, ValueRef id) {
unsigned pos;
if (cst->findId(id, &pos) && pos < cst->getNumDimIds()) {
swapId(cst, pos, cst->getNumDimIds() - 1);
@@ -794,7 +794,7 @@ static void turnDimIntoSymbol(FlatAffineConstraints *cst, Value &id) {
}
// Turn a symbol into a dimension.
-static void turnSymbolIntoDim(FlatAffineConstraints *cst, Value &id) {
+static void turnSymbolIntoDim(FlatAffineConstraints *cst, ValueRef id) {
unsigned pos;
if (cst->findId(id, &pos) && pos >= cst->getNumDimIds() &&
pos < cst->getNumDimAndSymbolIds()) {
@@ -806,18 +806,18 @@ static void turnSymbolIntoDim(FlatAffineConstraints *cst, Value &id) {
// Changes all symbol identifiers which are loop IVs to dim identifiers.
void FlatAffineConstraints::convertLoopIVSymbolsToDims() {
// Gather all symbols which are loop IVs.
- SmallVector<Value *, 4> loopIVs;
+ SmallVector<ValuePtr, 4> loopIVs;
for (unsigned i = getNumDimIds(), e = getNumDimAndSymbolIds(); i < e; i++) {
if (ids[i].hasValue() && getForInductionVarOwner(ids[i].getValue()))
loopIVs.push_back(ids[i].getValue());
}
// Turn each symbol in 'loopIVs' into a dim identifier.
- for (auto *iv : loopIVs) {
+ for (auto iv : loopIVs) {
turnSymbolIntoDim(this, *iv);
}
}
-void FlatAffineConstraints::addInductionVarOrTerminalSymbol(Value *id) {
+void FlatAffineConstraints::addInductionVarOrTerminalSymbol(ValuePtr id) {
if (containsId(*id))
return;
@@ -876,8 +876,8 @@ LogicalResult FlatAffineConstraints::addAffineForOpDomain(AffineForOp forOp) {
addConstantLowerBound(pos, forOp.getConstantLowerBound());
} else {
// Non-constant lower bound case.
- SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands().begin(),
- forOp.getLowerBoundOperands().end());
+ SmallVector<ValuePtr, 4> lbOperands(forOp.getLowerBoundOperands().begin(),
+ forOp.getLowerBoundOperands().end());
if (failed(addLowerOrUpperBound(pos, forOp.getLowerBoundMap(), lbOperands,
/*eq=*/false, /*lower=*/true)))
return failure();
@@ -888,8 +888,8 @@ LogicalResult FlatAffineConstraints::addAffineForOpDomain(AffineForOp forOp) {
return success();
}
// Non-constant upper bound case.
- SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands().begin(),
- forOp.getUpperBoundOperands().end());
+ SmallVector<ValuePtr, 4> ubOperands(forOp.getUpperBoundOperands().begin(),
+ forOp.getUpperBoundOperands().end());
return addLowerOrUpperBound(pos, forOp.getUpperBoundMap(), ubOperands,
/*eq=*/false, /*lower=*/false);
}
@@ -1757,7 +1757,7 @@ void FlatAffineConstraints::getSliceBounds(unsigned offset, unsigned num,
LogicalResult
FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
- ArrayRef<Value *> boundOperands,
+ ArrayRef<ValuePtr> boundOperands,
bool eq, bool lower) {
assert(pos < getNumDimAndSymbolIds() && "invalid position");
// Equality follows the logic of lower bound except that we add an equality
@@ -1769,11 +1769,11 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
// Fully compose map and operands; canonicalize and simplify so that we
// transitively get to terminal symbols or loop IVs.
auto map = boundMap;
- SmallVector<Value *, 4> operands(boundOperands.begin(), boundOperands.end());
+ SmallVector<ValuePtr, 4> operands(boundOperands.begin(), boundOperands.end());
fullyComposeAffineMapAndOperands(&map, &operands);
map = simplifyAffineMap(map);
canonicalizeMapAndOperands(&map, &operands);
- for (auto *operand : operands)
+ for (auto operand : operands)
addInductionVarOrTerminalSymbol(operand);
FlatAffineConstraints localVarCst;
@@ -1787,7 +1787,7 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
if (localVarCst.getNumLocalIds() > 0) {
// Set values for localVarCst.
localVarCst.setIdValues(0, localVarCst.getNumDimAndSymbolIds(), operands);
- for (auto *operand : operands) {
+ for (auto operand : operands) {
unsigned pos;
if (findId(*operand, &pos)) {
if (pos >= getNumDimIds() && pos < getNumDimAndSymbolIds()) {
@@ -1807,7 +1807,7 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
// this here since the constraint system changes after a bound is added.
SmallVector<unsigned, 8> positions;
unsigned numOperands = operands.size();
- for (auto *operand : operands) {
+ for (auto operand : operands) {
unsigned pos;
if (!findId(*operand, &pos))
assert(0 && "expected to be found");
@@ -1848,8 +1848,8 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
// Returns failure for unimplemented cases such as semi-affine expressions or
// expressions with mod/floordiv.
LogicalResult FlatAffineConstraints::addSliceBounds(
- ArrayRef<Value *> values, ArrayRef<AffineMap> lbMaps,
- ArrayRef<AffineMap> ubMaps, ArrayRef<Value *> operands) {
+ ArrayRef<ValuePtr> values, ArrayRef<AffineMap> lbMaps,
+ ArrayRef<AffineMap> ubMaps, ArrayRef<ValuePtr> operands) {
assert(values.size() == lbMaps.size());
assert(lbMaps.size() == ubMaps.size());
@@ -1971,7 +1971,7 @@ void FlatAffineConstraints::addLocalFloorDiv(ArrayRef<int64_t> dividend,
addInequality(bound);
}
-bool FlatAffineConstraints::findId(Value &id, unsigned *pos) const {
+bool FlatAffineConstraints::findId(ValueRef id, unsigned *pos) const {
unsigned i = 0;
for (const auto &mayBeId : ids) {
if (mayBeId.hasValue() && mayBeId.getValue() == &id) {
@@ -1983,8 +1983,8 @@ bool FlatAffineConstraints::findId(Value &id, unsigned *pos) const {
return false;
}
-bool FlatAffineConstraints::containsId(Value &id) const {
- return llvm::any_of(ids, [&](const Optional<Value *> &mayBeId) {
+bool FlatAffineConstraints::containsId(ValueRef id) const {
+ return llvm::any_of(ids, [&](const Optional<ValuePtr> &mayBeId) {
return mayBeId.hasValue() && mayBeId.getValue() == &id;
});
}
@@ -2008,7 +2008,7 @@ void FlatAffineConstraints::setIdToConstant(unsigned pos, int64_t val) {
/// Sets the specified identifier to a constant value; asserts if the id is not
/// found.
-void FlatAffineConstraints::setIdToConstant(Value &id, int64_t val) {
+void FlatAffineConstraints::setIdToConstant(ValueRef id, int64_t val) {
unsigned pos;
if (!findId(id, &pos))
// This is a pre-condition for this method.
@@ -2573,7 +2573,7 @@ void FlatAffineConstraints::FourierMotzkinEliminate(
unsigned newNumDims = dimsSymbols.first;
unsigned newNumSymbols = dimsSymbols.second;
- SmallVector<Optional<Value *>, 8> newIds;
+ SmallVector<Optional<ValuePtr>, 8> newIds;
newIds.reserve(numIds - 1);
newIds.append(ids.begin(), ids.begin() + pos);
newIds.append(ids.begin() + pos + 1, ids.end());
@@ -2709,7 +2709,7 @@ void FlatAffineConstraints::projectOut(unsigned pos, unsigned num) {
normalizeConstraintsByGCD();
}
-void FlatAffineConstraints::projectOut(Value *id) {
+void FlatAffineConstraints::projectOut(ValuePtr id) {
unsigned pos;
bool ret = findId(*id, &pos);
assert(ret);
diff --git a/mlir/lib/Analysis/CallGraph.cpp b/mlir/lib/Analysis/CallGraph.cpp
index 93017ca3b57..6ec7c059526 100644
--- a/mlir/lib/Analysis/CallGraph.cpp
+++ b/mlir/lib/Analysis/CallGraph.cpp
@@ -188,7 +188,7 @@ CallGraphNode *CallGraph::resolveCallable(CallInterfaceCallable callable,
callee = SymbolTable::lookupNearestSymbolFrom(from,
symbolRef.getRootReference());
else
- callee = callable.get<Value *>()->getDefiningOp();
+ callee = callable.get<ValuePtr>()->getDefiningOp();
// If the callee is non-null and is a valid callable object, try to get the
// called region from it.
diff --git a/mlir/lib/Analysis/Dominance.cpp b/mlir/lib/Analysis/Dominance.cpp
index c422578320f..532972b771b 100644
--- a/mlir/lib/Analysis/Dominance.cpp
+++ b/mlir/lib/Analysis/Dominance.cpp
@@ -127,7 +127,7 @@ bool DominanceInfo::properlyDominates(Operation *a, Operation *b) {
}
/// Return true if value A properly dominates operation B.
-bool DominanceInfo::properlyDominates(Value *a, Operation *b) {
+bool DominanceInfo::properlyDominates(ValuePtr a, Operation *b) {
if (auto *aOp = a->getDefiningOp()) {
// The values defined by an operation do *not* dominate any nested
// operations.
diff --git a/mlir/lib/Analysis/Liveness.cpp b/mlir/lib/Analysis/Liveness.cpp
index 6aaec4cc719..edb18e5645d 100644
--- a/mlir/lib/Analysis/Liveness.cpp
+++ b/mlir/lib/Analysis/Liveness.cpp
@@ -40,13 +40,13 @@ struct BlockInfoBuilder {
/// Fills the block builder with initial liveness information.
BlockInfoBuilder(Block *block) : block(block) {
// Mark all block arguments (phis) as defined.
- for (BlockArgument *argument : block->getArguments())
+ for (BlockArgumentPtr argument : block->getArguments())
defValues.insert(argument);
// Check all result values and whether their uses
// are inside this block or not (see outValues).
for (Operation &operation : *block)
- for (Value *result : operation.getResults()) {
+ for (ValuePtr result : operation.getResults()) {
defValues.insert(result);
// Check whether this value will be in the outValues
@@ -63,7 +63,7 @@ struct BlockInfoBuilder {
// Check all operations for used operands.
for (Operation &operation : block->getOperations())
- for (Value *operand : operation.getOperands()) {
+ for (ValuePtr operand : operation.getOperands()) {
// If the operand is already defined in the scope of this
// block, we can skip the value in the use set.
if (!defValues.count(operand))
@@ -173,7 +173,7 @@ void Liveness::build(MutableArrayRef<Region> regions) {
}
/// Gets liveness info (if any) for the given value.
-Liveness::OperationListT Liveness::resolveLiveness(Value *value) const {
+Liveness::OperationListT Liveness::resolveLiveness(ValuePtr value) const {
OperationListT result;
SmallPtrSet<Block *, 32> visited;
SmallVector<Block *, 8> toProcess;
@@ -238,7 +238,7 @@ const Liveness::ValueSetT &Liveness::getLiveOut(Block *block) const {
/// Returns true if the given operation represent the last use of the
/// given value.
-bool Liveness::isLastUse(Value *value, Operation *operation) const {
+bool Liveness::isLastUse(ValuePtr value, Operation *operation) const {
Block *block = operation->getBlock();
const LivenessBlockInfo *blockInfo = getLiveness(block);
@@ -263,21 +263,21 @@ void Liveness::print(raw_ostream &os) const {
// Builds unique block/value mappings for testing purposes.
DenseMap<Block *, size_t> blockIds;
DenseMap<Operation *, size_t> operationIds;
- DenseMap<Value *, size_t> valueIds;
+ DenseMap<ValuePtr, size_t> valueIds;
for (Region &region : operation->getRegions())
for (Block &block : region) {
blockIds.insert({&block, blockIds.size()});
- for (BlockArgument *argument : block.getArguments())
+ for (BlockArgumentPtr argument : block.getArguments())
valueIds.insert({argument, valueIds.size()});
for (Operation &operation : block) {
operationIds.insert({&operation, operationIds.size()});
- for (Value *result : operation.getResults())
+ for (ValuePtr result : operation.getResults())
valueIds.insert({result, valueIds.size()});
}
}
// Local printing helpers
- auto printValueRef = [&](Value *value) {
+ auto printValueRef = [&](ValuePtr value) {
if (Operation *defOp = value->getDefiningOp())
os << "val_" << defOp->getName();
else {
@@ -289,12 +289,12 @@ void Liveness::print(raw_ostream &os) const {
};
auto printValueRefs = [&](const ValueSetT &values) {
- std::vector<Value *> orderedValues(values.begin(), values.end());
+ std::vector<ValuePtr> orderedValues(values.begin(), values.end());
std::sort(orderedValues.begin(), orderedValues.end(),
- [&](Value *left, Value *right) {
+ [&](ValuePtr left, ValuePtr right) {
return valueIds[left] < valueIds[right];
});
- for (Value *value : orderedValues)
+ for (ValuePtr value : orderedValues)
printValueRef(value);
};
@@ -315,7 +315,7 @@ void Liveness::print(raw_ostream &os) const {
if (op.getNumResults() < 1)
continue;
os << "\n";
- for (Value *result : op.getResults()) {
+ for (ValuePtr result : op.getResults()) {
os << "// ";
printValueRef(result);
os << ":";
@@ -340,18 +340,18 @@ void Liveness::print(raw_ostream &os) const {
//===----------------------------------------------------------------------===//
/// Returns true if the given value is in the live-in set.
-bool LivenessBlockInfo::isLiveIn(Value *value) const {
+bool LivenessBlockInfo::isLiveIn(ValuePtr value) const {
return inValues.count(value);
}
/// Returns true if the given value is in the live-out set.
-bool LivenessBlockInfo::isLiveOut(Value *value) const {
+bool LivenessBlockInfo::isLiveOut(ValuePtr value) const {
return outValues.count(value);
}
/// Gets the start operation for the given value
/// (must be referenced in this block).
-Operation *LivenessBlockInfo::getStartOperation(Value *value) const {
+Operation *LivenessBlockInfo::getStartOperation(ValuePtr value) const {
Operation *definingOp = value->getDefiningOp();
// The given value is either live-in or is defined
// in the scope of this block.
@@ -362,7 +362,7 @@ Operation *LivenessBlockInfo::getStartOperation(Value *value) const {
/// Gets the end operation for the given value using the start operation
/// provided (must be referenced in this block).
-Operation *LivenessBlockInfo::getEndOperation(Value *value,
+Operation *LivenessBlockInfo::getEndOperation(ValuePtr value,
Operation *startOperation) const {
// The given value is either dying in this block or live-out.
if (isLiveOut(value))
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index a81116579ce..9dfbfe0c542 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -43,7 +43,7 @@ using namespace mlir;
// be more powerful (since both inequalities and equalities will be considered).
void mlir::buildTripCountMapAndOperands(
AffineForOp forOp, AffineMap *tripCountMap,
- SmallVectorImpl<Value *> *tripCountOperands) {
+ SmallVectorImpl<ValuePtr> *tripCountOperands) {
int64_t loopSpan;
int64_t step = forOp.getStep();
@@ -65,8 +65,8 @@ void mlir::buildTripCountMapAndOperands(
*tripCountMap = AffineMap();
return;
}
- SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
- SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands());
+ SmallVector<ValuePtr, 4> lbOperands(forOp.getLowerBoundOperands());
+ SmallVector<ValuePtr, 4> ubOperands(forOp.getUpperBoundOperands());
// Difference of each upper bound expression from the single lower bound
// expression (divided by the step) provides the expressions for the trip
@@ -98,7 +98,7 @@ void mlir::buildTripCountMapAndOperands(
// works with analysis structures (FlatAffineConstraints) and thus doesn't
// update the IR.
Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
AffineMap map;
buildTripCountMapAndOperands(forOp, &map, &operands);
@@ -124,7 +124,7 @@ Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
/// expression analysis is used (indirectly through getTripCount), and
/// this method is thus able to determine non-trivial divisors.
uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) {
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
AffineMap map;
buildTripCountMapAndOperands(forOp, &map, &operands);
@@ -173,7 +173,7 @@ uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) {
///
/// Returns false in cases with more than one AffineApplyOp, this is
/// conservative.
-static bool isAccessIndexInvariant(Value *iv, Value *index) {
+static bool isAccessIndexInvariant(ValuePtr iv, ValuePtr index) {
assert(isForInductionVar(iv) && "iv must be a AffineForOp");
assert(index->getType().isa<IndexType>() && "index must be of IndexType");
SmallVector<Operation *, 4> affineApplyOps;
@@ -197,11 +197,11 @@ static bool isAccessIndexInvariant(Value *iv, Value *index) {
return !(AffineValueMap(composeOp).isFunctionOf(0, iv));
}
-DenseSet<Value *> mlir::getInvariantAccesses(Value *iv,
- ArrayRef<Value *> indices) {
- DenseSet<Value *> res;
+DenseSet<ValuePtr> mlir::getInvariantAccesses(ValuePtr iv,
+ ArrayRef<ValuePtr> indices) {
+ DenseSet<ValuePtr> res;
for (unsigned idx = 0, n = indices.size(); idx < n; ++idx) {
- auto *val = indices[idx];
+ auto val = indices[idx];
if (isAccessIndexInvariant(iv, val)) {
res.insert(val);
}
@@ -229,7 +229,7 @@ DenseSet<Value *> mlir::getInvariantAccesses(Value *iv,
///
// TODO(ntv): check strides.
template <typename LoadOrStoreOp>
-static bool isContiguousAccess(Value *iv, LoadOrStoreOp memoryOp,
+static bool isContiguousAccess(ValuePtr iv, LoadOrStoreOp memoryOp,
int *memRefDim) {
static_assert(std::is_same<LoadOrStoreOp, AffineLoadOp>::value ||
std::is_same<LoadOrStoreOp, AffineStoreOp>::value,
@@ -250,11 +250,11 @@ static bool isContiguousAccess(Value *iv, LoadOrStoreOp memoryOp,
int uniqueVaryingIndexAlongIv = -1;
auto accessMap = memoryOp.getAffineMap();
- SmallVector<Value *, 4> mapOperands(memoryOp.getMapOperands());
+ SmallVector<ValuePtr, 4> mapOperands(memoryOp.getMapOperands());
unsigned numDims = accessMap.getNumDims();
for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
// Gather map operands used result expr 'i' in 'exprOperands'.
- SmallVector<Value *, 4> exprOperands;
+ SmallVector<ValuePtr, 4> exprOperands;
auto resultExpr = accessMap.getResult(i);
resultExpr.walk([&](AffineExpr expr) {
if (auto dimExpr = expr.dyn_cast<AffineDimExpr>())
@@ -263,7 +263,7 @@ static bool isContiguousAccess(Value *iv, LoadOrStoreOp memoryOp,
exprOperands.push_back(mapOperands[numDims + symExpr.getPosition()]);
});
// Check access invariance of each operand in 'exprOperands'.
- for (auto *exprOperand : exprOperands) {
+ for (auto exprOperand : exprOperands) {
if (!isAccessIndexInvariant(iv, exprOperand)) {
if (uniqueVaryingIndexAlongIv != -1) {
// 2+ varying indices -> do not vectorize along iv.
@@ -382,7 +382,7 @@ bool mlir::isInstwiseShiftValid(AffineForOp forOp, ArrayRef<uint64_t> shifts) {
// Validate the results of this operation if it were to be shifted.
for (unsigned i = 0, e = op.getNumResults(); i < e; ++i) {
- Value *result = op.getResult(i);
+ ValuePtr result = op.getResult(i);
for (auto *user : result->getUsers()) {
// If an ancestor operation doesn't lie in the block of forOp,
// there is no shift to check.
diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp
index 700321ebb40..b09bddddd66 100644
--- a/mlir/lib/Analysis/SliceAnalysis.cpp
+++ b/mlir/lib/Analysis/SliceAnalysis.cpp
@@ -104,8 +104,8 @@ static void getBackwardSliceImpl(Operation *op,
}
for (auto en : llvm::enumerate(op->getOperands())) {
- auto *operand = en.value();
- if (auto *blockArg = dyn_cast<BlockArgument>(operand)) {
+ auto operand = en.value();
+ if (auto blockArg = dyn_cast<BlockArgument>(operand)) {
if (auto affIv = getForInductionVarOwner(operand)) {
auto *affOp = affIv.getOperation();
if (backwardSlice->count(affOp) == 0)
diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index 3ba27bbb299..73aa07e7d7b 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -60,7 +60,7 @@ ComputationSliceState::getAsConstraints(FlatAffineConstraints *cst) {
// Adds operands (dst ivs and symbols) as symbols in 'cst'.
unsigned numSymbols = lbOperands[0].size();
- SmallVector<Value *, 4> values(ivs);
+ SmallVector<ValuePtr, 4> values(ivs);
// Append 'ivs' then 'operands' to 'values'.
values.append(lbOperands[0].begin(), lbOperands[0].end());
cst->reset(numDims, numSymbols, 0, values);
@@ -185,7 +185,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
if (rank == 0) {
SmallVector<AffineForOp, 4> ivs;
getLoopIVs(*op, &ivs);
- SmallVector<Value *, 8> regionSymbols;
+ SmallVector<ValuePtr, 8> regionSymbols;
extractForInductionVars(ivs, &regionSymbols);
// A rank 0 memref has a 0-d region.
cst.reset(rank, loopDepth, 0, regionSymbols);
@@ -201,7 +201,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
unsigned numSymbols = accessMap.getNumSymbols();
unsigned numOperands = accessValueMap.getNumOperands();
// Merge operands with slice operands.
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
operands.resize(numOperands);
for (unsigned i = 0; i < numOperands; ++i)
operands[i] = accessValueMap.getOperand(i);
@@ -224,7 +224,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
// Add equality constraints.
// Add inequalities for loop lower/upper bounds.
for (unsigned i = 0; i < numDims + numSymbols; ++i) {
- auto *operand = operands[i];
+ auto operand = operands[i];
if (auto loop = getForInductionVarOwner(operand)) {
// Note that cst can now have more dimensions than accessMap if the
// bounds expressions involve outer loops or other symbols.
@@ -234,7 +234,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
return failure();
} else {
// Has to be a valid symbol.
- auto *symbol = operand;
+ auto symbol = operand;
assert(isValidSymbol(symbol));
// Check if the symbol is a constant.
if (auto *op = symbol->getDefiningOp()) {
@@ -278,9 +278,9 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
getLoopIVs(*op, &enclosingIVs);
assert(loopDepth <= enclosingIVs.size() && "invalid loop depth");
enclosingIVs.resize(loopDepth);
- SmallVector<Value *, 4> ids;
+ SmallVector<ValuePtr, 4> ids;
cst.getIdValues(cst.getNumDimIds(), cst.getNumDimAndSymbolIds(), &ids);
- for (auto *id : ids) {
+ for (auto id : ids) {
AffineForOp iv;
if ((iv = getForInductionVarOwner(id)) &&
llvm::is_contained(enclosingIVs, iv) == false) {
@@ -345,9 +345,9 @@ Optional<int64_t> MemRefRegion::getRegionSize() {
// Indices to use for the DmaStart op.
// Indices for the original memref being DMAed from/to.
- SmallVector<Value *, 4> memIndices;
+ SmallVector<ValuePtr, 4> memIndices;
// Indices for the faster buffer being DMAed into/from.
- SmallVector<Value *, 4> bufIndices;
+ SmallVector<ValuePtr, 4> bufIndices;
// Compute the extents of the buffer.
Optional<int64_t> numElements = getConstantBoundingSizeAndShape();
@@ -480,10 +480,10 @@ static Operation *getInstAtPosition(ArrayRef<unsigned> positions,
}
// Adds loop IV bounds to 'cst' for loop IVs not found in 'ivs'.
-LogicalResult addMissingLoopIVBounds(SmallPtrSet<Value *, 8> &ivs,
+LogicalResult addMissingLoopIVBounds(SmallPtrSet<ValuePtr, 8> &ivs,
FlatAffineConstraints *cst) {
for (unsigned i = 0, e = cst->getNumDimIds(); i < e; ++i) {
- auto *value = cst->getIdValue(i);
+ auto value = cst->getIdValue(i);
if (ivs.count(value) == 0) {
assert(isForInductionVar(value));
auto loop = getForInductionVarOwner(value);
@@ -596,10 +596,10 @@ LogicalResult mlir::computeSliceUnion(ArrayRef<Operation *> opsA,
// Pre-constraint id alignment: record loop IVs used in each constraint
// system.
- SmallPtrSet<Value *, 8> sliceUnionIVs;
+ SmallPtrSet<ValuePtr, 8> sliceUnionIVs;
for (unsigned k = 0, l = sliceUnionCst.getNumDimIds(); k < l; ++k)
sliceUnionIVs.insert(sliceUnionCst.getIdValue(k));
- SmallPtrSet<Value *, 8> tmpSliceIVs;
+ SmallPtrSet<ValuePtr, 8> tmpSliceIVs;
for (unsigned k = 0, l = tmpSliceCst.getNumDimIds(); k < l; ++k)
tmpSliceIVs.insert(tmpSliceCst.getIdValue(k));
@@ -659,7 +659,7 @@ LogicalResult mlir::computeSliceUnion(ArrayRef<Operation *> opsA,
&sliceUnion->ubs);
// Add slice bound operands of union.
- SmallVector<Value *, 4> sliceBoundOperands;
+ SmallVector<ValuePtr, 4> sliceBoundOperands;
sliceUnionCst.getIdValues(numSliceLoopIVs,
sliceUnionCst.getNumDimAndSymbolIds(),
&sliceBoundOperands);
@@ -725,7 +725,7 @@ void mlir::getComputationSliceState(
&sliceState->lbs, &sliceState->ubs);
// Set up bound operands for the slice's lower and upper bounds.
- SmallVector<Value *, 4> sliceBoundOperands;
+ SmallVector<ValuePtr, 4> sliceBoundOperands;
unsigned numDimsAndSymbols = dependenceConstraints->getNumDimAndSymbolIds();
for (unsigned i = 0; i < numDimsAndSymbols; ++i) {
if (i < offset || i >= offset + numSliceLoopIVs) {
@@ -743,7 +743,7 @@ void mlir::getComputationSliceState(
isBackwardSlice ? dstLoopIVs[loopDepth - 1].getBody()->begin()
: std::prev(srcLoopIVs[loopDepth - 1].getBody()->end());
- llvm::SmallDenseSet<Value *, 8> sequentialLoops;
+ llvm::SmallDenseSet<ValuePtr, 8> sequentialLoops;
if (isa<AffineLoadOp>(depSourceOp) && isa<AffineLoadOp>(depSinkOp)) {
// For read-read access pairs, clear any slice bounds on sequential loops.
// Get sequential loops in loop nest rooted at 'srcLoopIVs[0]'.
@@ -758,7 +758,7 @@ void mlir::getComputationSliceState(
return isBackwardSlice ? srcLoopIVs[i] : dstLoopIVs[i];
};
for (unsigned i = 0; i < numSliceLoopIVs; ++i) {
- Value *iv = getSliceLoop(i).getInductionVar();
+ ValuePtr iv = getSliceLoop(i).getInductionVar();
if (sequentialLoops.count(iv) == 0 &&
getSliceLoop(i).getAttr(kSliceFusionBarrierAttrName) == nullptr)
continue;
@@ -846,7 +846,7 @@ MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) {
opInst = loadOrStoreOpInst;
auto loadMemrefType = loadOp.getMemRefType();
indices.reserve(loadMemrefType.getRank());
- for (auto *index : loadOp.getMapOperands()) {
+ for (auto index : loadOp.getMapOperands()) {
indices.push_back(index);
}
} else {
@@ -856,7 +856,7 @@ MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) {
memref = storeOp.getMemRef();
auto storeMemrefType = storeOp.getMemRefType();
indices.reserve(storeMemrefType.getRank());
- for (auto *index : storeOp.getMapOperands()) {
+ for (auto index : storeOp.getMapOperands()) {
indices.push_back(index);
}
}
@@ -919,7 +919,7 @@ static Optional<int64_t> getMemoryFootprintBytes(Block &block,
Block::iterator start,
Block::iterator end,
int memorySpace) {
- SmallDenseMap<Value *, std::unique_ptr<MemRefRegion>, 4> regions;
+ SmallDenseMap<ValuePtr, std::unique_ptr<MemRefRegion>, 4> regions;
// Walk this 'affine.for' operation to gather all memory regions.
auto result = block.walk(start, end, [&](Operation *opInst) -> WalkResult {
@@ -970,7 +970,7 @@ Optional<int64_t> mlir::getMemoryFootprintBytes(AffineForOp forOp,
/// Returns in 'sequentialLoops' all sequential loops in loop nest rooted
/// at 'forOp'.
void mlir::getSequentialLoops(
- AffineForOp forOp, llvm::SmallDenseSet<Value *, 8> *sequentialLoops) {
+ AffineForOp forOp, llvm::SmallDenseSet<ValuePtr, 8> *sequentialLoops) {
forOp.getOperation()->walk([&](Operation *op) {
if (auto innerFor = dyn_cast<AffineForOp>(op))
if (!isLoopParallel(innerFor))
diff --git a/mlir/lib/Analysis/VectorAnalysis.cpp b/mlir/lib/Analysis/VectorAnalysis.cpp
index 42d3f10b14c..a7917eba503 100644
--- a/mlir/lib/Analysis/VectorAnalysis.cpp
+++ b/mlir/lib/Analysis/VectorAnalysis.cpp
@@ -109,7 +109,7 @@ Optional<SmallVector<int64_t, 4>> mlir::shapeRatio(VectorType superVectorType,
/// Examples can be found in the documentation of `makePermutationMap`, in the
/// header file.
static AffineMap makePermutationMap(
- ArrayRef<Value *> indices,
+ ArrayRef<ValuePtr> indices,
const DenseMap<Operation *, unsigned> &enclosingLoopToVectorDim) {
if (enclosingLoopToVectorDim.empty())
return AffineMap();
@@ -167,7 +167,7 @@ static SetVector<Operation *> getEnclosingforOps(Operation *op) {
}
AffineMap mlir::makePermutationMap(
- Operation *op, ArrayRef<Value *> indices,
+ Operation *op, ArrayRef<ValuePtr> indices,
const DenseMap<Operation *, unsigned> &loopToVectorDim) {
DenseMap<Operation *, unsigned> enclosingLoopToVectorDim;
auto enclosingLoops = getEnclosingforOps(op);
diff --git a/mlir/lib/Analysis/Verifier.cpp b/mlir/lib/Analysis/Verifier.cpp
index 82f5aa5e01c..be499a93898 100644
--- a/mlir/lib/Analysis/Verifier.cpp
+++ b/mlir/lib/Analysis/Verifier.cpp
@@ -138,7 +138,7 @@ LogicalResult OperationVerifier::verifyRegion(Region &region) {
}
LogicalResult OperationVerifier::verifyBlock(Block &block) {
- for (auto *arg : block.getArguments())
+ for (auto arg : block.getArguments())
if (arg->getOwner() != &block)
return emitError(block, "block argument not owned by block");
@@ -175,7 +175,7 @@ LogicalResult OperationVerifier::verifyBlock(Block &block) {
LogicalResult OperationVerifier::verifyOperation(Operation &op) {
// Check that operands are non-nil and structurally ok.
- for (auto *operand : op.getOperands())
+ for (auto operand : op.getOperands())
if (!operand)
return op.emitError("null operand found");
@@ -244,7 +244,7 @@ LogicalResult OperationVerifier::verifyDominance(Operation &op) {
// Check that operands properly dominate this use.
for (unsigned operandNo = 0, e = op.getNumOperands(); operandNo != e;
++operandNo) {
- auto *operand = op.getOperand(operandNo);
+ auto operand = op.getOperand(operandNo);
if (domInfo->properlyDominates(operand, &op))
continue;
diff --git a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
index 3f613c6bfb5..144b4a97e87 100644
--- a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
+++ b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
@@ -42,16 +42,16 @@ namespace {
// that correspond to it. Visitation functions return an Value of the
// expression subtree they visited or `nullptr` on error.
class AffineApplyExpander
- : public AffineExprVisitor<AffineApplyExpander, Value *> {
+ : public AffineExprVisitor<AffineApplyExpander, ValuePtr> {
public:
// This internal class expects arguments to be non-null, checks must be
// performed at the call site.
- AffineApplyExpander(OpBuilder &builder, ArrayRef<Value *> dimValues,
- ArrayRef<Value *> symbolValues, Location loc)
+ AffineApplyExpander(OpBuilder &builder, ArrayRef<ValuePtr> dimValues,
+ ArrayRef<ValuePtr> symbolValues, Location loc)
: builder(builder), dimValues(dimValues), symbolValues(symbolValues),
loc(loc) {}
- template <typename OpTy> Value *buildBinaryExpr(AffineBinaryOpExpr expr) {
+ template <typename OpTy> ValuePtr buildBinaryExpr(AffineBinaryOpExpr expr) {
auto lhs = visit(expr.getLHS());
auto rhs = visit(expr.getRHS());
if (!lhs || !rhs)
@@ -60,11 +60,11 @@ public:
return op.getResult();
}
- Value *visitAddExpr(AffineBinaryOpExpr expr) {
+ ValuePtr visitAddExpr(AffineBinaryOpExpr expr) {
return buildBinaryExpr<AddIOp>(expr);
}
- Value *visitMulExpr(AffineBinaryOpExpr expr) {
+ ValuePtr visitMulExpr(AffineBinaryOpExpr expr) {
return buildBinaryExpr<MulIOp>(expr);
}
@@ -77,7 +77,7 @@ public:
// let remainder = srem a, b;
// negative = a < 0 in
// select negative, remainder + b, remainder.
- Value *visitModExpr(AffineBinaryOpExpr expr) {
+ ValuePtr visitModExpr(AffineBinaryOpExpr expr) {
auto rhsConst = expr.getRHS().dyn_cast<AffineConstantExpr>();
if (!rhsConst) {
emitError(
@@ -94,13 +94,13 @@ public:
auto rhs = visit(expr.getRHS());
assert(lhs && rhs && "unexpected affine expr lowering failure");
- Value *remainder = builder.create<SignedRemIOp>(loc, lhs, rhs);
- Value *zeroCst = builder.create<ConstantIndexOp>(loc, 0);
- Value *isRemainderNegative =
+ ValuePtr remainder = builder.create<SignedRemIOp>(loc, lhs, rhs);
+ ValuePtr zeroCst = builder.create<ConstantIndexOp>(loc, 0);
+ ValuePtr isRemainderNegative =
builder.create<CmpIOp>(loc, CmpIPredicate::slt, remainder, zeroCst);
- Value *correctedRemainder = builder.create<AddIOp>(loc, remainder, rhs);
- Value *result = builder.create<SelectOp>(loc, isRemainderNegative,
- correctedRemainder, remainder);
+ ValuePtr correctedRemainder = builder.create<AddIOp>(loc, remainder, rhs);
+ ValuePtr result = builder.create<SelectOp>(loc, isRemainderNegative,
+ correctedRemainder, remainder);
return result;
}
@@ -114,7 +114,7 @@ public:
// let absolute = negative ? -a - 1 : a in
// let quotient = absolute / b in
// negative ? -quotient - 1 : quotient
- Value *visitFloorDivExpr(AffineBinaryOpExpr expr) {
+ ValuePtr visitFloorDivExpr(AffineBinaryOpExpr expr) {
auto rhsConst = expr.getRHS().dyn_cast<AffineConstantExpr>();
if (!rhsConst) {
emitError(
@@ -131,16 +131,16 @@ public:
auto rhs = visit(expr.getRHS());
assert(lhs && rhs && "unexpected affine expr lowering failure");
- Value *zeroCst = builder.create<ConstantIndexOp>(loc, 0);
- Value *noneCst = builder.create<ConstantIndexOp>(loc, -1);
- Value *negative =
+ ValuePtr zeroCst = builder.create<ConstantIndexOp>(loc, 0);
+ ValuePtr noneCst = builder.create<ConstantIndexOp>(loc, -1);
+ ValuePtr negative =
builder.create<CmpIOp>(loc, CmpIPredicate::slt, lhs, zeroCst);
- Value *negatedDecremented = builder.create<SubIOp>(loc, noneCst, lhs);
- Value *dividend =
+ ValuePtr negatedDecremented = builder.create<SubIOp>(loc, noneCst, lhs);
+ ValuePtr dividend =
builder.create<SelectOp>(loc, negative, negatedDecremented, lhs);
- Value *quotient = builder.create<SignedDivIOp>(loc, dividend, rhs);
- Value *correctedQuotient = builder.create<SubIOp>(loc, noneCst, quotient);
- Value *result =
+ ValuePtr quotient = builder.create<SignedDivIOp>(loc, dividend, rhs);
+ ValuePtr correctedQuotient = builder.create<SubIOp>(loc, noneCst, quotient);
+ ValuePtr result =
builder.create<SelectOp>(loc, negative, correctedQuotient, quotient);
return result;
}
@@ -155,7 +155,7 @@ public:
// let absolute = negative ? -a : a - 1 in
// let quotient = absolute / b in
// negative ? -quotient : quotient + 1
- Value *visitCeilDivExpr(AffineBinaryOpExpr expr) {
+ ValuePtr visitCeilDivExpr(AffineBinaryOpExpr expr) {
auto rhsConst = expr.getRHS().dyn_cast<AffineConstantExpr>();
if (!rhsConst) {
emitError(loc) << "semi-affine expressions (division by non-const) are "
@@ -170,23 +170,24 @@ public:
auto rhs = visit(expr.getRHS());
assert(lhs && rhs && "unexpected affine expr lowering failure");
- Value *zeroCst = builder.create<ConstantIndexOp>(loc, 0);
- Value *oneCst = builder.create<ConstantIndexOp>(loc, 1);
- Value *nonPositive =
+ ValuePtr zeroCst = builder.create<ConstantIndexOp>(loc, 0);
+ ValuePtr oneCst = builder.create<ConstantIndexOp>(loc, 1);
+ ValuePtr nonPositive =
builder.create<CmpIOp>(loc, CmpIPredicate::sle, lhs, zeroCst);
- Value *negated = builder.create<SubIOp>(loc, zeroCst, lhs);
- Value *decremented = builder.create<SubIOp>(loc, lhs, oneCst);
- Value *dividend =
+ ValuePtr negated = builder.create<SubIOp>(loc, zeroCst, lhs);
+ ValuePtr decremented = builder.create<SubIOp>(loc, lhs, oneCst);
+ ValuePtr dividend =
builder.create<SelectOp>(loc, nonPositive, negated, decremented);
- Value *quotient = builder.create<SignedDivIOp>(loc, dividend, rhs);
- Value *negatedQuotient = builder.create<SubIOp>(loc, zeroCst, quotient);
- Value *incrementedQuotient = builder.create<AddIOp>(loc, quotient, oneCst);
- Value *result = builder.create<SelectOp>(loc, nonPositive, negatedQuotient,
- incrementedQuotient);
+ ValuePtr quotient = builder.create<SignedDivIOp>(loc, dividend, rhs);
+ ValuePtr negatedQuotient = builder.create<SubIOp>(loc, zeroCst, quotient);
+ ValuePtr incrementedQuotient =
+ builder.create<AddIOp>(loc, quotient, oneCst);
+ ValuePtr result = builder.create<SelectOp>(
+ loc, nonPositive, negatedQuotient, incrementedQuotient);
return result;
}
- Value *visitConstantExpr(AffineConstantExpr expr) {
+ ValuePtr visitConstantExpr(AffineConstantExpr expr) {
auto valueAttr =
builder.getIntegerAttr(builder.getIndexType(), expr.getValue());
auto op =
@@ -194,13 +195,13 @@ public:
return op.getResult();
}
- Value *visitDimExpr(AffineDimExpr expr) {
+ ValuePtr visitDimExpr(AffineDimExpr expr) {
assert(expr.getPosition() < dimValues.size() &&
"affine dim position out of range");
return dimValues[expr.getPosition()];
}
- Value *visitSymbolExpr(AffineSymbolExpr expr) {
+ ValuePtr visitSymbolExpr(AffineSymbolExpr expr) {
assert(expr.getPosition() < symbolValues.size() &&
"symbol dim position out of range");
return symbolValues[expr.getPosition()];
@@ -208,8 +209,8 @@ public:
private:
OpBuilder &builder;
- ArrayRef<Value *> dimValues;
- ArrayRef<Value *> symbolValues;
+ ArrayRef<ValuePtr> dimValues;
+ ArrayRef<ValuePtr> symbolValues;
Location loc;
};
@@ -217,18 +218,18 @@ private:
// Create a sequence of operations that implement the `expr` applied to the
// given dimension and symbol values.
-mlir::Value *mlir::expandAffineExpr(OpBuilder &builder, Location loc,
- AffineExpr expr,
- ArrayRef<Value *> dimValues,
- ArrayRef<Value *> symbolValues) {
+mlir::ValuePtr mlir::expandAffineExpr(OpBuilder &builder, Location loc,
+ AffineExpr expr,
+ ArrayRef<ValuePtr> dimValues,
+ ArrayRef<ValuePtr> symbolValues) {
return AffineApplyExpander(builder, dimValues, symbolValues, loc).visit(expr);
}
// Create a sequence of operations that implement the `affineMap` applied to
// the given `operands` (as it it were an AffineApplyOp).
-Optional<SmallVector<Value *, 8>> static expandAffineMap(
+Optional<SmallVector<ValuePtr, 8>> static expandAffineMap(
OpBuilder &builder, Location loc, AffineMap affineMap,
- ArrayRef<Value *> operands) {
+ ArrayRef<ValuePtr> operands) {
auto numDims = affineMap.getNumDims();
auto expanded = functional::map(
[numDims, &builder, loc, operands](AffineExpr expr) {
@@ -237,7 +238,7 @@ Optional<SmallVector<Value *, 8>> static expandAffineMap(
operands.drop_front(numDims));
},
affineMap.getResults());
- if (llvm::all_of(expanded, [](Value *v) { return v; }))
+ if (llvm::all_of(expanded, [](ValuePtr v) { return v; }))
return expanded;
return None;
}
@@ -253,13 +254,13 @@ Optional<SmallVector<Value *, 8>> static expandAffineMap(
// Multiple values are scanned in a linear sequence. This creates a data
// dependences that wouldn't exist in a tree reduction, but is easier to
// recognize as a reduction by the subsequent passes.
-static Value *buildMinMaxReductionSeq(Location loc, CmpIPredicate predicate,
- ArrayRef<Value *> values,
- OpBuilder &builder) {
+static ValuePtr buildMinMaxReductionSeq(Location loc, CmpIPredicate predicate,
+ ArrayRef<ValuePtr> values,
+ OpBuilder &builder) {
assert(!llvm::empty(values) && "empty min/max chain");
auto valueIt = values.begin();
- Value *value = *valueIt++;
+ ValuePtr value = *valueIt++;
for (; valueIt != values.end(); ++valueIt) {
auto cmpOp = builder.create<CmpIOp>(loc, predicate, value, *valueIt);
value = builder.create<SelectOp>(loc, cmpOp.getResult(), value, *valueIt);
@@ -271,8 +272,8 @@ static Value *buildMinMaxReductionSeq(Location loc, CmpIPredicate predicate,
// Emit instructions that correspond to the affine map in the lower bound
// applied to the respective operands, and compute the maximum value across
// the results.
-Value *mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) {
- SmallVector<Value *, 8> boundOperands(op.getLowerBoundOperands());
+ValuePtr mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) {
+ SmallVector<ValuePtr, 8> boundOperands(op.getLowerBoundOperands());
auto lbValues = expandAffineMap(builder, op.getLoc(), op.getLowerBoundMap(),
boundOperands);
if (!lbValues)
@@ -284,8 +285,8 @@ Value *mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) {
// Emit instructions that correspond to the affine map in the upper bound
// applied to the respective operands, and compute the minimum value across
// the results.
-Value *mlir::lowerAffineUpperBound(AffineForOp op, OpBuilder &builder) {
- SmallVector<Value *, 8> boundOperands(op.getUpperBoundOperands());
+ValuePtr mlir::lowerAffineUpperBound(AffineForOp op, OpBuilder &builder) {
+ SmallVector<ValuePtr, 8> boundOperands(op.getUpperBoundOperands());
auto ubValues = expandAffineMap(builder, op.getLoc(), op.getUpperBoundMap(),
boundOperands);
if (!ubValues)
@@ -314,9 +315,9 @@ public:
PatternMatchResult matchAndRewrite(AffineForOp op,
PatternRewriter &rewriter) const override {
Location loc = op.getLoc();
- Value *lowerBound = lowerAffineLowerBound(op, rewriter);
- Value *upperBound = lowerAffineUpperBound(op, rewriter);
- Value *step = rewriter.create<ConstantIndexOp>(loc, op.getStep());
+ ValuePtr lowerBound = lowerAffineLowerBound(op, rewriter);
+ ValuePtr upperBound = lowerAffineUpperBound(op, rewriter);
+ ValuePtr step = rewriter.create<ConstantIndexOp>(loc, op.getStep());
auto f = rewriter.create<loop::ForOp>(loc, lowerBound, upperBound, step);
f.region().getBlocks().clear();
rewriter.inlineRegionBefore(op.region(), f.region(), f.region().end());
@@ -335,25 +336,25 @@ public:
// Now we just have to handle the condition logic.
auto integerSet = op.getIntegerSet();
- Value *zeroConstant = rewriter.create<ConstantIndexOp>(loc, 0);
- SmallVector<Value *, 8> operands(op.getOperands());
+ ValuePtr zeroConstant = rewriter.create<ConstantIndexOp>(loc, 0);
+ SmallVector<ValuePtr, 8> operands(op.getOperands());
auto operandsRef = llvm::makeArrayRef(operands);
// Calculate cond as a conjunction without short-circuiting.
- Value *cond = nullptr;
+ ValuePtr cond = nullptr;
for (unsigned i = 0, e = integerSet.getNumConstraints(); i < e; ++i) {
AffineExpr constraintExpr = integerSet.getConstraint(i);
bool isEquality = integerSet.isEq(i);
// Build and apply an affine expression
auto numDims = integerSet.getNumDims();
- Value *affResult = expandAffineExpr(rewriter, loc, constraintExpr,
- operandsRef.take_front(numDims),
- operandsRef.drop_front(numDims));
+ ValuePtr affResult = expandAffineExpr(rewriter, loc, constraintExpr,
+ operandsRef.take_front(numDims),
+ operandsRef.drop_front(numDims));
if (!affResult)
return matchFailure();
auto pred = isEquality ? CmpIPredicate::eq : CmpIPredicate::sge;
- Value *cmpVal =
+ ValuePtr cmpVal =
rewriter.create<CmpIOp>(loc, pred, affResult, zeroConstant);
cond =
cond ? rewriter.create<AndOp>(loc, cond, cmpVal).getResult() : cmpVal;
@@ -404,7 +405,7 @@ public:
PatternMatchResult matchAndRewrite(AffineLoadOp op,
PatternRewriter &rewriter) const override {
// Expand affine map from 'affineLoadOp'.
- SmallVector<Value *, 8> indices(op.getMapOperands());
+ SmallVector<ValuePtr, 8> indices(op.getMapOperands());
auto resultOperands =
expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices);
if (!resultOperands)
@@ -426,7 +427,7 @@ public:
PatternMatchResult matchAndRewrite(AffinePrefetchOp op,
PatternRewriter &rewriter) const override {
// Expand affine map from 'affinePrefetchOp'.
- SmallVector<Value *, 8> indices(op.getMapOperands());
+ SmallVector<ValuePtr, 8> indices(op.getMapOperands());
auto resultOperands =
expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices);
if (!resultOperands)
@@ -450,7 +451,7 @@ public:
PatternMatchResult matchAndRewrite(AffineStoreOp op,
PatternRewriter &rewriter) const override {
// Expand affine map from 'affineStoreOp'.
- SmallVector<Value *, 8> indices(op.getMapOperands());
+ SmallVector<ValuePtr, 8> indices(op.getMapOperands());
auto maybeExpandedMap =
expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices);
if (!maybeExpandedMap)
@@ -472,7 +473,7 @@ public:
PatternMatchResult matchAndRewrite(AffineDmaStartOp op,
PatternRewriter &rewriter) const override {
- SmallVector<Value *, 8> operands(op.getOperands());
+ SmallVector<ValuePtr, 8> operands(op.getOperands());
auto operandsRef = llvm::makeArrayRef(operands);
// Expand affine map for DMA source memref.
@@ -513,7 +514,7 @@ public:
PatternMatchResult matchAndRewrite(AffineDmaWaitOp op,
PatternRewriter &rewriter) const override {
// Expand affine map for DMA tag memref.
- SmallVector<Value *, 8> indices(op.getTagIndices());
+ SmallVector<ValuePtr, 8> indices(op.getTagIndices());
auto maybeExpandedTagMap =
expandAffineMap(rewriter, op.getLoc(), op.getTagMap(), indices);
if (!maybeExpandedTagMap)
diff --git a/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h b/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h
index 6a1a580e369..a408ab5b5d9 100644
--- a/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h
+++ b/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h
@@ -57,11 +57,11 @@ public:
// Convert the kernel arguments to an LLVM type, preserve the rest.
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto loc = op->getLoc();
auto dialect = lowering.getDialect();
- Value *newOp;
+ ValuePtr newOp;
switch (dimensionToIndex(cast<Op>(op))) {
case X:
newOp = rewriter.create<XOp>(loc, LLVM::LLVMType::getInt32Ty(dialect));
diff --git a/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h b/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h
index 23bfa303708..3ab8e75633e 100644
--- a/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h
+++ b/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h
@@ -44,7 +44,7 @@ public:
f32Func(f32Func), f64Func(f64Func) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
using LLVM::LLVMFuncOp;
using LLVM::LLVMType;
@@ -69,10 +69,10 @@ public:
private:
LLVM::LLVMType getFunctionType(LLVM::LLVMType resultType,
- ArrayRef<Value *> operands) const {
+ ArrayRef<ValuePtr> operands) const {
using LLVM::LLVMType;
SmallVector<LLVMType, 1> operandTypes;
- for (Value *operand : operands) {
+ for (ValuePtr operand : operands) {
operandTypes.push_back(operand->getType().cast<LLVMType>());
}
return LLVMType::getFunctionTy(resultType, operandTypes,
diff --git a/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp b/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp
index f342083bee7..840ad6ba701 100644
--- a/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp
+++ b/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp
@@ -114,7 +114,7 @@ private:
}
// Allocate a void pointer on the stack.
- Value *allocatePointer(OpBuilder &builder, Location loc) {
+ ValuePtr allocatePointer(OpBuilder &builder, Location loc) {
auto one = builder.create<LLVM::ConstantOp>(loc, getInt32Type(),
builder.getI32IntegerAttr(1));
return builder.create<LLVM::AllocaOp>(loc, getPointerPointerType(), one,
@@ -122,9 +122,9 @@ private:
}
void declareCudaFunctions(Location loc);
- Value *setupParamsArray(gpu::LaunchFuncOp launchOp, OpBuilder &builder);
- Value *generateKernelNameConstant(StringRef name, Location loc,
- OpBuilder &builder);
+ ValuePtr setupParamsArray(gpu::LaunchFuncOp launchOp, OpBuilder &builder);
+ ValuePtr generateKernelNameConstant(StringRef name, Location loc,
+ OpBuilder &builder);
void translateGpuLaunchCalls(mlir::gpu::LaunchFuncOp launchOp);
public:
@@ -248,7 +248,7 @@ void GpuLaunchFuncToCudaCallsPass::declareCudaFunctions(Location loc) {
// for (i : [0, NumKernelOperands))
// %array[i] = cast<void*>(KernelOperand[i])
// return %array
-Value *
+ValuePtr
GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp,
OpBuilder &builder) {
auto numKernelOperands = launchOp.getNumKernelOperands();
@@ -264,7 +264,7 @@ GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp,
for (unsigned idx = 0; idx < numKernelOperands; ++idx) {
auto operand = launchOp.getKernelOperand(idx);
auto llvmType = operand->getType().cast<LLVM::LLVMType>();
- Value *memLocation = builder.create<LLVM::AllocaOp>(
+ ValuePtr memLocation = builder.create<LLVM::AllocaOp>(
loc, llvmType.getPointerTo(), one, /*alignment=*/1);
builder.create<LLVM::StoreOp>(loc, operand, memLocation);
auto casted =
@@ -280,12 +280,12 @@ GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp,
getModule().lookupSymbol<LLVM::LLVMFuncOp>(kMcuMemHostRegister);
auto nullPtr = builder.create<LLVM::NullOp>(loc, llvmType.getPointerTo());
auto gep = builder.create<LLVM::GEPOp>(loc, llvmType.getPointerTo(),
- ArrayRef<Value *>{nullPtr, one});
+ ArrayRef<ValuePtr>{nullPtr, one});
auto size = builder.create<LLVM::PtrToIntOp>(loc, getInt64Type(), gep);
builder.create<LLVM::CallOp>(loc, ArrayRef<Type>{},
builder.getSymbolRefAttr(registerFunc),
- ArrayRef<Value *>{casted, size});
- Value *memLocation = builder.create<LLVM::AllocaOp>(
+ ArrayRef<ValuePtr>{casted, size});
+ ValuePtr memLocation = builder.create<LLVM::AllocaOp>(
loc, getPointerPointerType(), one, /*alignment=*/1);
builder.create<LLVM::StoreOp>(loc, casted, memLocation);
casted =
@@ -295,7 +295,7 @@ GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp,
auto index = builder.create<LLVM::ConstantOp>(
loc, getInt32Type(), builder.getI32IntegerAttr(idx));
auto gep = builder.create<LLVM::GEPOp>(loc, getPointerPointerType(), array,
- ArrayRef<Value *>{index});
+ ArrayRef<ValuePtr>{index});
builder.create<LLVM::StoreOp>(loc, casted, gep);
}
return array;
@@ -311,7 +311,7 @@ GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp,
// %1 = llvm.constant (0 : index)
// %2 = llvm.getelementptr %0[%1, %1] : !llvm<"i8*">
// }
-Value *GpuLaunchFuncToCudaCallsPass::generateKernelNameConstant(
+ValuePtr GpuLaunchFuncToCudaCallsPass::generateKernelNameConstant(
StringRef name, Location loc, OpBuilder &builder) {
// Make sure the trailing zero is included in the constant.
std::vector<char> kernelName(name.begin(), name.end());
@@ -367,7 +367,7 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
assert(kernelModule.getName() && "expected a named module");
SmallString<128> nameBuffer(*kernelModule.getName());
nameBuffer.append(kCubinStorageSuffix);
- Value *data = LLVM::createGlobalString(
+ ValuePtr data = LLVM::createGlobalString(
loc, builder, nameBuffer.str(), cubinAttr.getValue(),
LLVM::Linkage::Internal, getLLVMDialect());
@@ -378,7 +378,7 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
getModule().lookupSymbol<LLVM::LLVMFuncOp>(cuModuleLoadName);
builder.create<LLVM::CallOp>(loc, ArrayRef<Type>{getCUResultType()},
builder.getSymbolRefAttr(cuModuleLoad),
- ArrayRef<Value *>{cuModule, data});
+ ArrayRef<ValuePtr>{cuModule, data});
// Get the function from the module. The name corresponds to the name of
// the kernel function.
auto cuOwningModuleRef =
@@ -390,13 +390,13 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
builder.create<LLVM::CallOp>(
loc, ArrayRef<Type>{getCUResultType()},
builder.getSymbolRefAttr(cuModuleGetFunction),
- ArrayRef<Value *>{cuFunction, cuOwningModuleRef, kernelName});
+ ArrayRef<ValuePtr>{cuFunction, cuOwningModuleRef, kernelName});
// Grab the global stream needed for execution.
auto cuGetStreamHelper =
getModule().lookupSymbol<LLVM::LLVMFuncOp>(cuGetStreamHelperName);
auto cuStream = builder.create<LLVM::CallOp>(
loc, ArrayRef<Type>{getPointerType()},
- builder.getSymbolRefAttr(cuGetStreamHelper), ArrayRef<Value *>{});
+ builder.getSymbolRefAttr(cuGetStreamHelper), ArrayRef<ValuePtr>{});
// Invoke the function with required arguments.
auto cuLaunchKernel =
getModule().lookupSymbol<LLVM::LLVMFuncOp>(cuLaunchKernelName);
@@ -408,19 +408,19 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
builder.create<LLVM::CallOp>(
loc, ArrayRef<Type>{getCUResultType()},
builder.getSymbolRefAttr(cuLaunchKernel),
- ArrayRef<Value *>{cuFunctionRef, launchOp.getOperand(0),
- launchOp.getOperand(1), launchOp.getOperand(2),
- launchOp.getOperand(3), launchOp.getOperand(4),
- launchOp.getOperand(5), zero, /* sharedMemBytes */
- cuStream.getResult(0), /* stream */
- paramsArray, /* kernel params */
- nullpointer /* extra */});
+ ArrayRef<ValuePtr>{cuFunctionRef, launchOp.getOperand(0),
+ launchOp.getOperand(1), launchOp.getOperand(2),
+ launchOp.getOperand(3), launchOp.getOperand(4),
+ launchOp.getOperand(5), zero, /* sharedMemBytes */
+ cuStream.getResult(0), /* stream */
+ paramsArray, /* kernel params */
+ nullpointer /* extra */});
// Sync on the stream to make it synchronous.
auto cuStreamSync =
getModule().lookupSymbol<LLVM::LLVMFuncOp>(cuStreamSynchronizeName);
builder.create<LLVM::CallOp>(loc, ArrayRef<Type>{getCUResultType()},
builder.getSymbolRefAttr(cuStreamSync),
- ArrayRef<Value *>(cuStream.getResult(0)));
+ ArrayRef<ValuePtr>(cuStream.getResult(0)));
launchOp.erase();
}
diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
index 220df53b977..bf18ea03dab 100644
--- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
@@ -60,8 +60,8 @@ public:
/// Converts all_reduce op to LLVM/NVVM ops.
struct GPUAllReduceOpLowering : public LLVMOpLowering {
- using AccumulatorFactory = std::function<Value *(
- Location, Value *, Value *, ConversionPatternRewriter &)>;
+ using AccumulatorFactory = std::function<ValuePtr(
+ Location, ValuePtr, ValuePtr, ConversionPatternRewriter &)>;
explicit GPUAllReduceOpLowering(LLVMTypeConverter &lowering_)
: LLVMOpLowering(gpu::AllReduceOp::getOperationName(),
@@ -69,10 +69,10 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering {
int32Type(LLVM::LLVMType::getInt32Ty(lowering_.getDialect())) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
Location loc = op->getLoc();
- Value *operand = operands.front();
+ ValuePtr operand = operands.front();
// TODO(csigg): Generalize to other types of accumulation.
assert(op->getOperand(0)->getType().isIntOrFloat());
@@ -81,7 +81,7 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering {
AccumulatorFactory factory =
getFactory(cast<gpu::AllReduceOp>(op), operand);
assert(factory && "failed to create accumulator factory");
- Value *result = createBlockReduce(loc, operand, factory, rewriter);
+ ValuePtr result = createBlockReduce(loc, operand, factory, rewriter);
rewriter.replaceOp(op, {result});
return matchSuccess();
@@ -91,7 +91,7 @@ private:
/// Returns an accumulator factory using either the op attribute or the body
/// region.
AccumulatorFactory getFactory(gpu::AllReduceOp allReduce,
- Value *operand) const {
+ ValuePtr operand) const {
if (!allReduce.body().empty()) {
return getFactory(allReduce.body());
}
@@ -106,7 +106,7 @@ private:
/// block is expected to have 2 arguments. The gpu.yield return the
/// accumulated value of the same type.
AccumulatorFactory getFactory(Region &body) const {
- return AccumulatorFactory([&](Location loc, Value *lhs, Value *rhs,
+ return AccumulatorFactory([&](Location loc, ValuePtr lhs, ValuePtr rhs,
ConversionPatternRewriter &rewriter) {
Block *block = rewriter.getInsertionBlock();
Block *split = rewriter.splitBlock(block, rewriter.getInsertionPoint());
@@ -120,7 +120,7 @@ private:
// Add branch before inserted body, into body.
block = block->getNextNode();
- rewriter.create<LLVM::BrOp>(loc, ArrayRef<Value *>{},
+ rewriter.create<LLVM::BrOp>(loc, ArrayRef<ValuePtr>{},
llvm::makeArrayRef(block), ValueRange());
// Replace all gpu.yield ops with branch out of body.
@@ -130,7 +130,7 @@ private:
continue;
rewriter.setInsertionPointToEnd(block);
rewriter.replaceOpWithNewOp<LLVM::BrOp>(
- terminator, ArrayRef<Value *>{}, llvm::makeArrayRef(split),
+ terminator, ArrayRef<ValuePtr>{}, llvm::makeArrayRef(split),
ValueRange(terminator->getOperand(0)));
}
@@ -161,7 +161,7 @@ private:
/// Returns an accumulator factory that creates an op of type T.
template <typename T> AccumulatorFactory getFactory() const {
- return [](Location loc, Value *lhs, Value *rhs,
+ return [](Location loc, ValuePtr lhs, ValuePtr rhs,
ConversionPatternRewriter &rewriter) {
return rewriter.create<T>(loc, lhs->getType(), lhs, rhs);
};
@@ -203,60 +203,60 @@ private:
/// %result = llvm.load %result_ptr
/// return %result
///
- Value *createBlockReduce(Location loc, Value *operand,
- AccumulatorFactory &accumFactory,
- ConversionPatternRewriter &rewriter) const {
+ ValuePtr createBlockReduce(Location loc, ValuePtr operand,
+ AccumulatorFactory &accumFactory,
+ ConversionPatternRewriter &rewriter) const {
auto type = operand->getType().cast<LLVM::LLVMType>();
// Create shared memory array to store the warp reduction.
auto module = operand->getDefiningOp()->getParentOfType<ModuleOp>();
assert(module && "op must belong to a module");
- Value *sharedMemPtr =
+ ValuePtr sharedMemPtr =
createSharedMemoryArray(loc, module, type, kWarpSize, rewriter);
- Value *zero = rewriter.create<LLVM::ConstantOp>(
+ ValuePtr zero = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(0u));
- Value *laneId = rewriter.create<NVVM::LaneIdOp>(loc, int32Type);
- Value *isFirstLane = rewriter.create<LLVM::ICmpOp>(
+ ValuePtr laneId = rewriter.create<NVVM::LaneIdOp>(loc, int32Type);
+ ValuePtr isFirstLane = rewriter.create<LLVM::ICmpOp>(
loc, LLVM::ICmpPredicate::eq, laneId, zero);
- Value *threadIdx = getLinearThreadIndex(loc, rewriter);
- Value *blockSize = getBlockSize(loc, rewriter);
- Value *activeWidth = getActiveWidth(loc, threadIdx, blockSize, rewriter);
+ ValuePtr threadIdx = getLinearThreadIndex(loc, rewriter);
+ ValuePtr blockSize = getBlockSize(loc, rewriter);
+ ValuePtr activeWidth = getActiveWidth(loc, threadIdx, blockSize, rewriter);
// Reduce elements within each warp to produce the intermediate results.
- Value *warpReduce = createWarpReduce(loc, activeWidth, laneId, operand,
- accumFactory, rewriter);
+ ValuePtr warpReduce = createWarpReduce(loc, activeWidth, laneId, operand,
+ accumFactory, rewriter);
// Write the intermediate results to shared memory, using the first lane of
// each warp.
createPredicatedBlock(loc, rewriter, isFirstLane, [&] {
- Value *warpId = getDivideByWarpSize(threadIdx, rewriter);
- Value *storeDst = rewriter.create<LLVM::GEPOp>(
- loc, type, sharedMemPtr, ArrayRef<Value *>({zero, warpId}));
+ ValuePtr warpId = getDivideByWarpSize(threadIdx, rewriter);
+ ValuePtr storeDst = rewriter.create<LLVM::GEPOp>(
+ loc, type, sharedMemPtr, ArrayRef<ValuePtr>({zero, warpId}));
rewriter.create<LLVM::StoreOp>(loc, warpReduce, storeDst);
});
rewriter.create<NVVM::Barrier0Op>(loc);
- Value *numWarps = getNumWarps(loc, blockSize, rewriter);
- Value *isValidWarp = rewriter.create<LLVM::ICmpOp>(
+ ValuePtr numWarps = getNumWarps(loc, blockSize, rewriter);
+ ValuePtr isValidWarp = rewriter.create<LLVM::ICmpOp>(
loc, LLVM::ICmpPredicate::slt, threadIdx, numWarps);
- Value *resultPtr = rewriter.create<LLVM::GEPOp>(
- loc, type, sharedMemPtr, ArrayRef<Value *>({zero, zero}));
+ ValuePtr resultPtr = rewriter.create<LLVM::GEPOp>(
+ loc, type, sharedMemPtr, ArrayRef<ValuePtr>({zero, zero}));
// Use the first numWarps threads to reduce the intermediate results from
// shared memory. The final result is written to shared memory again.
createPredicatedBlock(loc, rewriter, isValidWarp, [&] {
- Value *loadSrc = rewriter.create<LLVM::GEPOp>(
- loc, type, sharedMemPtr, ArrayRef<Value *>({zero, threadIdx}));
- Value *value = rewriter.create<LLVM::LoadOp>(loc, type, loadSrc);
- Value *result = createWarpReduce(loc, numWarps, laneId, value,
- accumFactory, rewriter);
+ ValuePtr loadSrc = rewriter.create<LLVM::GEPOp>(
+ loc, type, sharedMemPtr, ArrayRef<ValuePtr>({zero, threadIdx}));
+ ValuePtr value = rewriter.create<LLVM::LoadOp>(loc, type, loadSrc);
+ ValuePtr result = createWarpReduce(loc, numWarps, laneId, value,
+ accumFactory, rewriter);
rewriter.create<LLVM::StoreOp>(loc, result, resultPtr);
});
rewriter.create<NVVM::Barrier0Op>(loc);
// Load and return result from shared memory.
- Value *result = rewriter.create<LLVM::LoadOp>(loc, type, resultPtr);
+ ValuePtr result = rewriter.create<LLVM::LoadOp>(loc, type, resultPtr);
return result;
}
@@ -274,7 +274,7 @@ private:
///
template <typename ThenOpsFactory, typename ElseOpsFactory>
void createIf(Location loc, ConversionPatternRewriter &rewriter,
- Value *condition, ThenOpsFactory &&thenOpsFactory,
+ ValuePtr condition, ThenOpsFactory &&thenOpsFactory,
ElseOpsFactory &&elseOpsFactory) const {
Block *currentBlock = rewriter.getInsertionBlock();
auto currentPoint = rewriter.getInsertionPoint();
@@ -288,7 +288,7 @@ private:
ArrayRef<Block *>{thenBlock, elseBlock});
auto addBranch = [&](ValueRange operands) {
- rewriter.create<LLVM::BrOp>(loc, ArrayRef<Value *>{},
+ rewriter.create<LLVM::BrOp>(loc, ArrayRef<ValuePtr>{},
llvm::makeArrayRef(continueBlock),
llvm::makeArrayRef(operands));
};
@@ -303,32 +303,32 @@ private:
assert(thenOperands.size() == elseOperands.size());
rewriter.setInsertionPointToStart(continueBlock);
- for (auto *operand : thenOperands)
+ for (auto operand : thenOperands)
continueBlock->addArgument(operand->getType());
}
/// Shortcut for createIf with empty else block and no block operands.
template <typename Factory>
void createPredicatedBlock(Location loc, ConversionPatternRewriter &rewriter,
- Value *condition,
+ ValuePtr condition,
Factory &&predicatedOpsFactory) const {
createIf(
loc, rewriter, condition,
[&] {
predicatedOpsFactory();
- return ArrayRef<Value *>();
+ return ArrayRef<ValuePtr>();
},
- [&] { return ArrayRef<Value *>(); });
+ [&] { return ArrayRef<ValuePtr>(); });
}
/// Creates a reduction across the first activeWidth lanes of a warp.
/// The first lane returns the result, all others return values are undefined.
- Value *createWarpReduce(Location loc, Value *activeWidth, Value *laneId,
- Value *operand, AccumulatorFactory accumFactory,
- ConversionPatternRewriter &rewriter) const {
- Value *warpSize = rewriter.create<LLVM::ConstantOp>(
+ ValuePtr createWarpReduce(Location loc, ValuePtr activeWidth, ValuePtr laneId,
+ ValuePtr operand, AccumulatorFactory accumFactory,
+ ConversionPatternRewriter &rewriter) const {
+ ValuePtr warpSize = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(kWarpSize));
- Value *isPartialWarp = rewriter.create<LLVM::ICmpOp>(
+ ValuePtr isPartialWarp = rewriter.create<LLVM::ICmpOp>(
loc, LLVM::ICmpPredicate::slt, activeWidth, warpSize);
auto type = operand->getType().cast<LLVM::LLVMType>();
@@ -336,16 +336,16 @@ private:
loc, rewriter, isPartialWarp,
// Generate reduction over a (potentially) partial warp.
[&] {
- Value *value = operand;
- Value *one = rewriter.create<LLVM::ConstantOp>(
+ ValuePtr value = operand;
+ ValuePtr one = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(1));
// Bit mask of active lanes: `(1 << activeWidth) - 1`.
- Value *activeMask = rewriter.create<LLVM::SubOp>(
+ ValuePtr activeMask = rewriter.create<LLVM::SubOp>(
loc, int32Type,
rewriter.create<LLVM::ShlOp>(loc, int32Type, one, activeWidth),
one);
// Clamp lane: `activeWidth - 1`
- Value *maskAndClamp =
+ ValuePtr maskAndClamp =
rewriter.create<LLVM::SubOp>(loc, int32Type, activeWidth, one);
auto dialect = lowering.getDialect();
auto predTy = LLVM::LLVMType::getInt1Ty(dialect);
@@ -356,53 +356,53 @@ private:
// lane is within the active range. All lanes contain the final
// result, but only the first lane's result is used.
for (int i = 1; i < kWarpSize; i <<= 1) {
- Value *offset = rewriter.create<LLVM::ConstantOp>(
+ ValuePtr offset = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(i));
- Value *shfl = rewriter.create<NVVM::ShflBflyOp>(
+ ValuePtr shfl = rewriter.create<NVVM::ShflBflyOp>(
loc, shflTy, activeMask, value, offset, maskAndClamp,
returnValueAndIsValidAttr);
- Value *isActiveSrcLane = rewriter.create<LLVM::ExtractValueOp>(
+ ValuePtr isActiveSrcLane = rewriter.create<LLVM::ExtractValueOp>(
loc, predTy, shfl, rewriter.getIndexArrayAttr(1));
// Skip the accumulation if the shuffle op read from a lane outside
// of the active range.
createIf(
loc, rewriter, isActiveSrcLane,
[&] {
- Value *shflValue = rewriter.create<LLVM::ExtractValueOp>(
+ ValuePtr shflValue = rewriter.create<LLVM::ExtractValueOp>(
loc, type, shfl, rewriter.getIndexArrayAttr(0));
- return SmallVector<Value *, 1>{
+ return SmallVector<ValuePtr, 1>{
accumFactory(loc, value, shflValue, rewriter)};
},
[&] { return llvm::makeArrayRef(value); });
value = rewriter.getInsertionBlock()->getArgument(0);
}
- return SmallVector<Value *, 1>{value};
+ return SmallVector<ValuePtr, 1>{value};
},
// Generate a reduction over the entire warp. This is a specialization
// of the above reduction with unconditional accumulation.
[&] {
- Value *value = operand;
- Value *activeMask = rewriter.create<LLVM::ConstantOp>(
+ ValuePtr value = operand;
+ ValuePtr activeMask = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(~0u));
- Value *maskAndClamp = rewriter.create<LLVM::ConstantOp>(
+ ValuePtr maskAndClamp = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(kWarpSize - 1));
for (int i = 1; i < kWarpSize; i <<= 1) {
- Value *offset = rewriter.create<LLVM::ConstantOp>(
+ ValuePtr offset = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(i));
- Value *shflValue = rewriter.create<NVVM::ShflBflyOp>(
+ ValuePtr shflValue = rewriter.create<NVVM::ShflBflyOp>(
loc, type, activeMask, value, offset, maskAndClamp,
/*return_value_and_is_valid=*/UnitAttr());
value = accumFactory(loc, value, shflValue, rewriter);
}
- return SmallVector<Value *, 1>{value};
+ return SmallVector<ValuePtr, 1>{value};
});
return rewriter.getInsertionBlock()->getArgument(0);
}
/// Creates a global array stored in shared memory.
- Value *createSharedMemoryArray(Location loc, ModuleOp module,
- LLVM::LLVMType elementType, int numElements,
- ConversionPatternRewriter &rewriter) const {
+ ValuePtr createSharedMemoryArray(Location loc, ModuleOp module,
+ LLVM::LLVMType elementType, int numElements,
+ ConversionPatternRewriter &rewriter) const {
OpBuilder builder(module.getBodyRegion());
auto arrayType = LLVM::LLVMType::getArrayTy(elementType, numElements);
@@ -416,31 +416,32 @@ private:
}
/// Returns the index of the thread within the block.
- Value *getLinearThreadIndex(Location loc,
- ConversionPatternRewriter &rewriter) const {
- Value *dimX = rewriter.create<NVVM::BlockDimXOp>(loc, int32Type);
- Value *dimY = rewriter.create<NVVM::BlockDimYOp>(loc, int32Type);
- Value *idX = rewriter.create<NVVM::ThreadIdXOp>(loc, int32Type);
- Value *idY = rewriter.create<NVVM::ThreadIdYOp>(loc, int32Type);
- Value *idZ = rewriter.create<NVVM::ThreadIdZOp>(loc, int32Type);
- Value *tmp1 = rewriter.create<LLVM::MulOp>(loc, int32Type, idZ, dimY);
- Value *tmp2 = rewriter.create<LLVM::AddOp>(loc, int32Type, tmp1, idY);
- Value *tmp3 = rewriter.create<LLVM::MulOp>(loc, int32Type, tmp2, dimX);
+ ValuePtr getLinearThreadIndex(Location loc,
+ ConversionPatternRewriter &rewriter) const {
+ ValuePtr dimX = rewriter.create<NVVM::BlockDimXOp>(loc, int32Type);
+ ValuePtr dimY = rewriter.create<NVVM::BlockDimYOp>(loc, int32Type);
+ ValuePtr idX = rewriter.create<NVVM::ThreadIdXOp>(loc, int32Type);
+ ValuePtr idY = rewriter.create<NVVM::ThreadIdYOp>(loc, int32Type);
+ ValuePtr idZ = rewriter.create<NVVM::ThreadIdZOp>(loc, int32Type);
+ ValuePtr tmp1 = rewriter.create<LLVM::MulOp>(loc, int32Type, idZ, dimY);
+ ValuePtr tmp2 = rewriter.create<LLVM::AddOp>(loc, int32Type, tmp1, idY);
+ ValuePtr tmp3 = rewriter.create<LLVM::MulOp>(loc, int32Type, tmp2, dimX);
return rewriter.create<LLVM::AddOp>(loc, int32Type, tmp3, idX);
}
/// Returns the number of threads in the block.
- Value *getBlockSize(Location loc, ConversionPatternRewriter &rewriter) const {
- Value *dimX = rewriter.create<NVVM::BlockDimXOp>(loc, int32Type);
- Value *dimY = rewriter.create<NVVM::BlockDimYOp>(loc, int32Type);
- Value *dimZ = rewriter.create<NVVM::BlockDimZOp>(loc, int32Type);
- Value *dimXY = rewriter.create<LLVM::MulOp>(loc, int32Type, dimX, dimY);
+ ValuePtr getBlockSize(Location loc,
+ ConversionPatternRewriter &rewriter) const {
+ ValuePtr dimX = rewriter.create<NVVM::BlockDimXOp>(loc, int32Type);
+ ValuePtr dimY = rewriter.create<NVVM::BlockDimYOp>(loc, int32Type);
+ ValuePtr dimZ = rewriter.create<NVVM::BlockDimZOp>(loc, int32Type);
+ ValuePtr dimXY = rewriter.create<LLVM::MulOp>(loc, int32Type, dimX, dimY);
return rewriter.create<LLVM::MulOp>(loc, int32Type, dimXY, dimZ);
}
/// Returns the number of warps in the block.
- Value *getNumWarps(Location loc, Value *blockSize,
- ConversionPatternRewriter &rewriter) const {
+ ValuePtr getNumWarps(Location loc, ValuePtr blockSize,
+ ConversionPatternRewriter &rewriter) const {
auto warpSizeMinusOne = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(kWarpSize - 1));
auto biasedBlockSize = rewriter.create<LLVM::AddOp>(
@@ -449,19 +450,19 @@ private:
}
/// Returns the number of active threads in the warp, not clamped to 32.
- Value *getActiveWidth(Location loc, Value *threadIdx, Value *blockSize,
- ConversionPatternRewriter &rewriter) const {
- Value *threadIdxMask = rewriter.create<LLVM::ConstantOp>(
+ ValuePtr getActiveWidth(Location loc, ValuePtr threadIdx, ValuePtr blockSize,
+ ConversionPatternRewriter &rewriter) const {
+ ValuePtr threadIdxMask = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(~(kWarpSize - 1)));
- Value *numThreadsWithSmallerWarpId =
+ ValuePtr numThreadsWithSmallerWarpId =
rewriter.create<LLVM::AndOp>(loc, threadIdx, threadIdxMask);
return rewriter.create<LLVM::SubOp>(loc, blockSize,
numThreadsWithSmallerWarpId);
}
/// Returns value divided by the warp size (i.e. 32).
- Value *getDivideByWarpSize(Value *value,
- ConversionPatternRewriter &rewriter) const {
+ ValuePtr getDivideByWarpSize(ValuePtr value,
+ ConversionPatternRewriter &rewriter) const {
auto loc = value->getLoc();
auto warpSize = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(kWarpSize));
@@ -495,7 +496,7 @@ struct GPUShuffleOpLowering : public LLVMOpLowering {
/// %shfl_pred = llvm.extractvalue %shfl[1 : index] :
/// !llvm<"{ float, i1 }">
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
Location loc = op->getLoc();
gpu::ShuffleOpOperandAdaptor adaptor(operands);
@@ -506,24 +507,24 @@ struct GPUShuffleOpLowering : public LLVMOpLowering {
auto predTy = LLVM::LLVMType::getInt1Ty(dialect);
auto resultTy = LLVM::LLVMType::getStructTy(dialect, {valueTy, predTy});
- Value *one = rewriter.create<LLVM::ConstantOp>(
+ ValuePtr one = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(1));
// Bit mask of active lanes: `(1 << activeWidth) - 1`.
- Value *activeMask = rewriter.create<LLVM::SubOp>(
+ ValuePtr activeMask = rewriter.create<LLVM::SubOp>(
loc, int32Type,
rewriter.create<LLVM::ShlOp>(loc, int32Type, one, adaptor.width()),
one);
// Clamp lane: `activeWidth - 1`
- Value *maskAndClamp =
+ ValuePtr maskAndClamp =
rewriter.create<LLVM::SubOp>(loc, int32Type, adaptor.width(), one);
auto returnValueAndIsValidAttr = rewriter.getUnitAttr();
- Value *shfl = rewriter.create<NVVM::ShflBflyOp>(
+ ValuePtr shfl = rewriter.create<NVVM::ShflBflyOp>(
loc, resultTy, activeMask, adaptor.value(), adaptor.offset(),
maskAndClamp, returnValueAndIsValidAttr);
- Value *shflValue = rewriter.create<LLVM::ExtractValueOp>(
+ ValuePtr shflValue = rewriter.create<LLVM::ExtractValueOp>(
loc, valueTy, shfl, rewriter.getIndexArrayAttr(0));
- Value *isActiveSrcLane = rewriter.create<LLVM::ExtractValueOp>(
+ ValuePtr isActiveSrcLane = rewriter.create<LLVM::ExtractValueOp>(
loc, predTy, shfl, rewriter.getIndexArrayAttr(1));
rewriter.replaceOp(op, {shflValue, isActiveSrcLane});
@@ -538,7 +539,7 @@ struct GPUFuncOpLowering : LLVMOpLowering {
typeConverter) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
assert(operands.empty() && "func op is not expected to have operands");
auto gpuFuncOp = cast<gpu::GPUFuncOp>(op);
@@ -547,7 +548,7 @@ struct GPUFuncOpLowering : LLVMOpLowering {
SmallVector<LLVM::GlobalOp, 3> workgroupBuffers;
workgroupBuffers.reserve(gpuFuncOp.getNumWorkgroupAttributions());
for (auto en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) {
- Value *attribution = en.value();
+ ValuePtr attribution = en.value();
auto type = attribution->getType().dyn_cast<MemRefType>();
assert(type && type.hasStaticShape() && "unexpected type in attribution");
@@ -604,23 +605,23 @@ struct GPUFuncOpLowering : LLVMOpLowering {
unsigned numProperArguments = gpuFuncOp.getNumArguments();
auto i32Type = LLVM::LLVMType::getInt32Ty(lowering.getDialect());
- Value *zero = nullptr;
+ ValuePtr zero = nullptr;
if (!workgroupBuffers.empty())
zero = rewriter.create<LLVM::ConstantOp>(loc, i32Type,
rewriter.getI32IntegerAttr(0));
for (auto en : llvm::enumerate(workgroupBuffers)) {
LLVM::GlobalOp global = en.value();
- Value *address = rewriter.create<LLVM::AddressOfOp>(loc, global);
+ ValuePtr address = rewriter.create<LLVM::AddressOfOp>(loc, global);
auto elementType = global.getType().getArrayElementType();
- Value *memory = rewriter.create<LLVM::GEPOp>(
+ ValuePtr memory = rewriter.create<LLVM::GEPOp>(
loc, elementType.getPointerTo(global.addr_space().getZExtValue()),
- address, ArrayRef<Value *>{zero, zero});
+ address, ArrayRef<ValuePtr>{zero, zero});
// Build a memref descriptor pointing to the buffer to plug with the
// existing memref infrastructure. This may use more registers than
// otherwise necessary given that memref sizes are fixed, but we can try
// and canonicalize that away later.
- Value *attribution = gpuFuncOp.getWorkgroupAttributions()[en.index()];
+ ValuePtr attribution = gpuFuncOp.getWorkgroupAttributions()[en.index()];
auto type = attribution->getType().cast<MemRefType>();
auto descr = MemRefDescriptor::fromStaticShape(rewriter, loc, lowering,
type, memory);
@@ -632,7 +633,7 @@ struct GPUFuncOpLowering : LLVMOpLowering {
gpuFuncOp.getNumWorkgroupAttributions();
auto int64Ty = LLVM::LLVMType::getInt64Ty(lowering.getDialect());
for (auto en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) {
- Value *attribution = en.value();
+ ValuePtr attribution = en.value();
auto type = attribution->getType().cast<MemRefType>();
assert(type && type.hasStaticShape() &&
"unexpected type in attribution");
@@ -643,10 +644,10 @@ struct GPUFuncOpLowering : LLVMOpLowering {
auto ptrType = lowering.convertType(type.getElementType())
.cast<LLVM::LLVMType>()
.getPointerTo();
- Value *numElements = rewriter.create<LLVM::ConstantOp>(
+ ValuePtr numElements = rewriter.create<LLVM::ConstantOp>(
gpuFuncOp.getLoc(), int64Ty,
rewriter.getI64IntegerAttr(type.getNumElements()));
- Value *allocated = rewriter.create<LLVM::AllocaOp>(
+ ValuePtr allocated = rewriter.create<LLVM::AllocaOp>(
gpuFuncOp.getLoc(), ptrType, numElements, /*alignment=*/0);
auto descr = MemRefDescriptor::fromStaticShape(rewriter, loc, lowering,
type, allocated);
@@ -674,8 +675,8 @@ struct GPUFuncOpLowering : LLVMOpLowering {
!en.value().isa<UnrankedMemRefType>())
continue;
- BlockArgument *arg = block.getArgument(en.index());
- Value *loaded = rewriter.create<LLVM::LoadOp>(loc, arg);
+ BlockArgumentPtr arg = block.getArgument(en.index());
+ ValuePtr loaded = rewriter.create<LLVM::LoadOp>(loc, arg);
rewriter.replaceUsesOfBlockArgument(arg, loaded);
}
}
@@ -692,7 +693,7 @@ struct GPUReturnOpLowering : public LLVMOpLowering {
typeConverter) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, operands,
ArrayRef<Block *>());
diff --git a/mlir/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.cpp
index 42483a6e5df..0c34fc2b8e1 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.cpp
@@ -36,7 +36,7 @@ public:
using SPIRVOpLowering<loop::ForOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(loop::ForOp forOp, ArrayRef<Value *> operands,
+ matchAndRewrite(loop::ForOp forOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -48,7 +48,7 @@ public:
using SPIRVOpLowering<SourceOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(SourceOp op, ArrayRef<Value *> operands,
+ matchAndRewrite(SourceOp op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -65,7 +65,7 @@ public:
}
PatternMatchResult
- matchAndRewrite(gpu::GPUFuncOp funcOp, ArrayRef<Value *> operands,
+ matchAndRewrite(gpu::GPUFuncOp funcOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
private:
@@ -79,7 +79,7 @@ public:
using SPIRVOpLowering<ModuleOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(ModuleOp moduleOp, ArrayRef<Value *> operands,
+ matchAndRewrite(ModuleOp moduleOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -92,7 +92,7 @@ public:
using SPIRVOpLowering<ModuleTerminatorOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(ModuleTerminatorOp terminatorOp, ArrayRef<Value *> operands,
+ matchAndRewrite(ModuleTerminatorOp terminatorOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -103,7 +103,7 @@ public:
using SPIRVOpLowering<gpu::ReturnOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(gpu::ReturnOp returnOp, ArrayRef<Value *> operands,
+ matchAndRewrite(gpu::ReturnOp returnOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -114,7 +114,7 @@ public:
//===----------------------------------------------------------------------===//
PatternMatchResult
-ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef<Value *> operands,
+ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
// loop::ForOp can be lowered to the structured control flow represented by
// spirv::LoopOp by making the continue block of the spirv::LoopOp the loop
@@ -135,7 +135,7 @@ ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef<Value *> operands,
loopOp.body().getBlocks().insert(std::next(loopOp.body().begin(), 1), header);
// Create the new induction variable to use.
- BlockArgument *newIndVar =
+ BlockArgumentPtr newIndVar =
header->addArgument(forOperands.lowerBound()->getType());
Block *body = forOp.getBody();
@@ -166,7 +166,7 @@ ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef<Value *> operands,
auto cmpOp = rewriter.create<spirv::SLessThanOp>(
loc, rewriter.getI1Type(), newIndVar, forOperands.upperBound());
rewriter.create<spirv::BranchConditionalOp>(
- loc, cmpOp, body, ArrayRef<Value *>(), mergeBlock, ArrayRef<Value *>());
+ loc, cmpOp, body, ArrayRef<ValuePtr>(), mergeBlock, ArrayRef<ValuePtr>());
// Generate instructions to increment the step of the induction variable and
// branch to the header.
@@ -174,7 +174,7 @@ ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef<Value *> operands,
rewriter.setInsertionPointToEnd(continueBlock);
// Add the step to the induction variable and branch to the header.
- Value *updatedIndVar = rewriter.create<spirv::IAddOp>(
+ ValuePtr updatedIndVar = rewriter.create<spirv::IAddOp>(
loc, newIndVar->getType(), newIndVar, forOperands.step());
rewriter.create<spirv::BranchOp>(loc, header, updatedIndVar);
@@ -188,7 +188,7 @@ ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef<Value *> operands,
template <typename SourceOp, spirv::BuiltIn builtin>
PatternMatchResult LaunchConfigConversion<SourceOp, builtin>::matchAndRewrite(
- SourceOp op, ArrayRef<Value *> operands,
+ SourceOp op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
auto dimAttr =
op.getOperation()->template getAttrOfType<StringAttr>("dimension");
@@ -267,7 +267,7 @@ lowerAsEntryFunction(gpu::GPUFuncOp funcOp, SPIRVTypeConverter &typeConverter,
PatternMatchResult
KernelFnConversion::matchAndRewrite(gpu::GPUFuncOp funcOp,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
if (!gpu::GPUDialect::isKernel(funcOp)) {
return matchFailure();
@@ -297,7 +297,7 @@ KernelFnConversion::matchAndRewrite(gpu::GPUFuncOp funcOp,
//===----------------------------------------------------------------------===//
PatternMatchResult KernelModuleConversion::matchAndRewrite(
- ModuleOp moduleOp, ArrayRef<Value *> operands,
+ ModuleOp moduleOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
if (!moduleOp.getAttrOfType<UnitAttr>(
gpu::GPUDialect::getKernelModuleAttrName())) {
@@ -327,7 +327,7 @@ PatternMatchResult KernelModuleConversion::matchAndRewrite(
//===----------------------------------------------------------------------===//
PatternMatchResult KernelModuleTerminatorConversion::matchAndRewrite(
- ModuleTerminatorOp terminatorOp, ArrayRef<Value *> operands,
+ ModuleTerminatorOp terminatorOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
rewriter.replaceOpWithNewOp<spirv::ModuleEndOp>(terminatorOp);
return matchSuccess();
@@ -338,7 +338,7 @@ PatternMatchResult KernelModuleTerminatorConversion::matchAndRewrite(
//===----------------------------------------------------------------------===//
PatternMatchResult GPUReturnOpConversion::matchAndRewrite(
- gpu::ReturnOp returnOp, ArrayRef<Value *> operands,
+ gpu::ReturnOp returnOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
if (!operands.empty())
return matchFailure();
diff --git a/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp b/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
index 3eb23c19dc7..8b6b9fb7930 100644
--- a/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
+++ b/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
@@ -120,21 +120,23 @@ public:
BaseViewConversionHelper(Type type)
: d(MemRefDescriptor::undef(rewriter(), loc(), type)) {}
- BaseViewConversionHelper(Value *v) : d(v) {}
+ BaseViewConversionHelper(ValuePtr v) : d(v) {}
/// Wrappers around MemRefDescriptor that use EDSC builder and location.
- Value *allocatedPtr() { return d.allocatedPtr(rewriter(), loc()); }
- void setAllocatedPtr(Value *v) { d.setAllocatedPtr(rewriter(), loc(), v); }
- Value *alignedPtr() { return d.alignedPtr(rewriter(), loc()); }
- void setAlignedPtr(Value *v) { d.setAlignedPtr(rewriter(), loc(), v); }
- Value *offset() { return d.offset(rewriter(), loc()); }
- void setOffset(Value *v) { d.setOffset(rewriter(), loc(), v); }
- Value *size(unsigned i) { return d.size(rewriter(), loc(), i); }
- void setSize(unsigned i, Value *v) { d.setSize(rewriter(), loc(), i, v); }
- Value *stride(unsigned i) { return d.stride(rewriter(), loc(), i); }
- void setStride(unsigned i, Value *v) { d.setStride(rewriter(), loc(), i, v); }
-
- operator Value *() { return d; }
+ ValuePtr allocatedPtr() { return d.allocatedPtr(rewriter(), loc()); }
+ void setAllocatedPtr(ValuePtr v) { d.setAllocatedPtr(rewriter(), loc(), v); }
+ ValuePtr alignedPtr() { return d.alignedPtr(rewriter(), loc()); }
+ void setAlignedPtr(ValuePtr v) { d.setAlignedPtr(rewriter(), loc(), v); }
+ ValuePtr offset() { return d.offset(rewriter(), loc()); }
+ void setOffset(ValuePtr v) { d.setOffset(rewriter(), loc(), v); }
+ ValuePtr size(unsigned i) { return d.size(rewriter(), loc(), i); }
+ void setSize(unsigned i, ValuePtr v) { d.setSize(rewriter(), loc(), i, v); }
+ ValuePtr stride(unsigned i) { return d.stride(rewriter(), loc(), i); }
+ void setStride(unsigned i, ValuePtr v) {
+ d.setStride(rewriter(), loc(), i, v);
+ }
+
+ operator ValuePtr() { return d; }
private:
OpBuilder &rewriter() { return ScopedContext::getBuilder(); }
@@ -151,7 +153,7 @@ public:
: LLVMOpLowering(RangeOp::getOperationName(), context, lowering_) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto rangeOp = cast<RangeOp>(op);
auto rangeDescriptorTy =
@@ -161,7 +163,7 @@ public:
// Fill in an aggregate value of the descriptor.
RangeOpOperandAdaptor adaptor(operands);
- Value *desc = llvm_undef(rangeDescriptorTy);
+ ValuePtr desc = llvm_undef(rangeDescriptorTy);
desc = insertvalue(desc, adaptor.min(), rewriter.getI64ArrayAttr(0));
desc = insertvalue(desc, adaptor.max(), rewriter.getI64ArrayAttr(1));
desc = insertvalue(desc, adaptor.step(), rewriter.getI64ArrayAttr(2));
@@ -184,7 +186,7 @@ public:
: LLVMOpLowering(SliceOp::getOperationName(), context, lowering_) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
edsc::ScopedContext context(rewriter, op->getLoc());
SliceOpOperandAdaptor adaptor(operands);
@@ -198,7 +200,7 @@ public:
BaseViewConversionHelper desc(lowering.convertType(sliceOp.getViewType()));
// TODO(ntv): extract sizes and emit asserts.
- SmallVector<Value *, 4> strides(memRefType.getRank());
+ SmallVector<ValuePtr, 4> strides(memRefType.getRank());
for (int i = 0, e = memRefType.getRank(); i < e; ++i)
strides[i] = baseDesc.stride(i);
@@ -207,10 +209,10 @@ public:
};
// Compute base offset.
- Value *baseOffset = baseDesc.offset();
+ ValuePtr baseOffset = baseDesc.offset();
for (int i = 0, e = memRefType.getRank(); i < e; ++i) {
- Value *indexing = adaptor.indexings()[i];
- Value *min = indexing;
+ ValuePtr indexing = adaptor.indexings()[i];
+ ValuePtr min = indexing;
if (sliceOp.indexing(i)->getType().isa<RangeType>())
min = extractvalue(int64Ty, indexing, pos(0));
baseOffset = add(baseOffset, mul(min, strides[i]));
@@ -227,29 +229,29 @@ public:
if (sliceOp.getViewType().getRank() == 0)
return rewriter.replaceOp(op, {desc}), matchSuccess();
- Value *zero =
+ ValuePtr zero =
constant(int64Ty, rewriter.getIntegerAttr(rewriter.getIndexType(), 0));
// Compute and insert view sizes (max - min along the range) and strides.
// Skip the non-range operands as they will be projected away from the view.
int numNewDims = 0;
for (auto en : llvm::enumerate(sliceOp.indexings())) {
- Value *indexing = en.value();
+ ValuePtr indexing = en.value();
if (indexing->getType().isa<RangeType>()) {
int rank = en.index();
- Value *rangeDescriptor = adaptor.indexings()[rank];
- Value *min = extractvalue(int64Ty, rangeDescriptor, pos(0));
- Value *max = extractvalue(int64Ty, rangeDescriptor, pos(1));
- Value *step = extractvalue(int64Ty, rangeDescriptor, pos(2));
- Value *baseSize = baseDesc.size(rank);
+ ValuePtr rangeDescriptor = adaptor.indexings()[rank];
+ ValuePtr min = extractvalue(int64Ty, rangeDescriptor, pos(0));
+ ValuePtr max = extractvalue(int64Ty, rangeDescriptor, pos(1));
+ ValuePtr step = extractvalue(int64Ty, rangeDescriptor, pos(2));
+ ValuePtr baseSize = baseDesc.size(rank);
// Bound upper by base view upper bound.
max = llvm_select(llvm_icmp(ICmpPredicate::slt, max, baseSize), max,
baseSize);
- Value *size = sub(max, min);
+ ValuePtr size = sub(max, min);
// Bound lower by zero.
size =
llvm_select(llvm_icmp(ICmpPredicate::slt, size, zero), zero, size);
- Value *stride = mul(strides[rank], step);
+ ValuePtr stride = mul(strides[rank], step);
desc.setSize(numNewDims, size);
desc.setStride(numNewDims, stride);
++numNewDims;
@@ -275,7 +277,7 @@ public:
: LLVMOpLowering(TransposeOp::getOperationName(), context, lowering_) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
// Initialize the common boilerplate and alloca at the top of the FuncOp.
edsc::ScopedContext context(rewriter, op->getLoc());
@@ -318,7 +320,7 @@ public:
: LLVMOpLowering(YieldOp::getOperationName(), context, lowering_) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, operands);
return matchSuccess();
@@ -453,7 +455,7 @@ public:
op.getLoc(), rewriter.getIntegerAttr(rewriter.getIndexType(), 0));
auto indexedGenericOp = cast<IndexedGenericOp>(op);
auto numLoops = indexedGenericOp.getNumLoops();
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
operands.reserve(numLoops + op.getNumOperands());
for (unsigned i = 0; i < numLoops; ++i) {
operands.push_back(zero);
@@ -477,7 +479,7 @@ public:
PatternMatchResult matchAndRewrite(CopyOp op,
PatternRewriter &rewriter) const override {
- Value *in = op.input(), *out = op.output();
+ ValuePtr in = op.input(), out = op.output();
// If either inputPerm or outputPerm are non-identities, insert transposes.
auto inputPerm = op.inputPermutation();
diff --git a/mlir/lib/Conversion/LoopToStandard/ConvertLoopToStandard.cpp b/mlir/lib/Conversion/LoopToStandard/ConvertLoopToStandard.cpp
index ff93ce58fd4..d8df7487e71 100644
--- a/mlir/lib/Conversion/LoopToStandard/ConvertLoopToStandard.cpp
+++ b/mlir/lib/Conversion/LoopToStandard/ConvertLoopToStandard.cpp
@@ -182,22 +182,22 @@ ForLowering::matchAndRewrite(ForOp forOp, PatternRewriter &rewriter) const {
rewriter.splitBlock(conditionBlock, conditionBlock->begin());
auto *lastBodyBlock = &forOp.region().back();
rewriter.inlineRegionBefore(forOp.region(), endBlock);
- auto *iv = conditionBlock->getArgument(0);
+ auto iv = conditionBlock->getArgument(0);
// Append the induction variable stepping logic to the last body block and
// branch back to the condition block. Construct an expression f :
// (x -> x+step) and apply this expression to the induction variable.
rewriter.setInsertionPointToEnd(lastBodyBlock);
- auto *step = forOp.step();
- auto *stepped = rewriter.create<AddIOp>(loc, iv, step).getResult();
+ auto step = forOp.step();
+ auto stepped = rewriter.create<AddIOp>(loc, iv, step).getResult();
if (!stepped)
return matchFailure();
rewriter.create<BranchOp>(loc, conditionBlock, stepped);
// Compute loop bounds before branching to the condition.
rewriter.setInsertionPointToEnd(initBlock);
- Value *lowerBound = forOp.lowerBound();
- Value *upperBound = forOp.upperBound();
+ ValuePtr lowerBound = forOp.lowerBound();
+ ValuePtr upperBound = forOp.upperBound();
if (!lowerBound || !upperBound)
return matchFailure();
rewriter.create<BranchOp>(loc, conditionBlock, lowerBound);
@@ -208,8 +208,8 @@ ForLowering::matchAndRewrite(ForOp forOp, PatternRewriter &rewriter) const {
rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, iv, upperBound);
rewriter.create<CondBranchOp>(loc, comparison, firstBodyBlock,
- ArrayRef<Value *>(), endBlock,
- ArrayRef<Value *>());
+ ArrayRef<ValuePtr>(), endBlock,
+ ArrayRef<ValuePtr>());
// Ok, we're done!
rewriter.eraseOp(forOp);
return matchSuccess();
@@ -248,8 +248,8 @@ IfLowering::matchAndRewrite(IfOp ifOp, PatternRewriter &rewriter) const {
rewriter.setInsertionPointToEnd(condBlock);
rewriter.create<CondBranchOp>(loc, ifOp.condition(), thenBlock,
- /*trueArgs=*/ArrayRef<Value *>(), elseBlock,
- /*falseArgs=*/ArrayRef<Value *>());
+ /*trueArgs=*/ArrayRef<ValuePtr>(), elseBlock,
+ /*falseArgs=*/ArrayRef<ValuePtr>());
// Ok, we're done!
rewriter.eraseOp(ifOp);
diff --git a/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp b/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp
index d663ae105f2..3cbce7caa76 100644
--- a/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp
+++ b/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp
@@ -43,7 +43,7 @@ using namespace mlir::loop;
using llvm::seq;
// Extract an indexed value from KernelDim3.
-static Value *getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) {
+static ValuePtr getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) {
switch (pos) {
case 0:
return dim3.x;
@@ -61,8 +61,8 @@ static Value *getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) {
static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) {
return forOp.getLowerBoundOperands();
}
-static SmallVector<Value *, 1> getLowerBoundOperands(ForOp forOp) {
- SmallVector<Value *, 1> bounds(1, forOp.lowerBound());
+static SmallVector<ValuePtr, 1> getLowerBoundOperands(ForOp forOp) {
+ SmallVector<ValuePtr, 1> bounds(1, forOp.lowerBound());
return bounds;
}
@@ -70,33 +70,35 @@ static SmallVector<Value *, 1> getLowerBoundOperands(ForOp forOp) {
static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) {
return forOp.getUpperBoundOperands();
}
-static SmallVector<Value *, 1> getUpperBoundOperands(ForOp forOp) {
- SmallVector<Value *, 1> bounds(1, forOp.upperBound());
+static SmallVector<ValuePtr, 1> getUpperBoundOperands(ForOp forOp) {
+ SmallVector<ValuePtr, 1> bounds(1, forOp.upperBound());
return bounds;
}
// Get a Value that corresponds to the loop step. If the step is an attribute,
// materialize a corresponding constant using builder.
-static Value *getOrCreateStep(AffineForOp forOp, OpBuilder &builder) {
+static ValuePtr getOrCreateStep(AffineForOp forOp, OpBuilder &builder) {
return builder.create<ConstantIndexOp>(forOp.getLoc(), forOp.getStep());
}
-static Value *getOrCreateStep(ForOp forOp, OpBuilder &) { return forOp.step(); }
+static ValuePtr getOrCreateStep(ForOp forOp, OpBuilder &) {
+ return forOp.step();
+}
// Get a Value for the loop lower bound. If the value requires computation,
// materialize the instructions using builder.
-static Value *getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) {
+static ValuePtr getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) {
return lowerAffineLowerBound(forOp, builder);
}
-static Value *getOrEmitLowerBound(ForOp forOp, OpBuilder &) {
+static ValuePtr getOrEmitLowerBound(ForOp forOp, OpBuilder &) {
return forOp.lowerBound();
}
// Get a Value for the loop upper bound. If the value requires computation,
// materialize the instructions using builder.
-static Value *getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) {
+static ValuePtr getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) {
return lowerAffineUpperBound(forOp, builder);
}
-static Value *getOrEmitUpperBound(ForOp forOp, OpBuilder &) {
+static ValuePtr getOrEmitUpperBound(ForOp forOp, OpBuilder &) {
return forOp.upperBound();
}
@@ -212,18 +214,18 @@ struct LoopToGpuConverter {
unsigned numThreadDims);
// Ranges of the loops mapped to blocks or threads.
- SmallVector<Value *, 6> dims;
+ SmallVector<ValuePtr, 6> dims;
// Lower bounds of the loops mapped to blocks or threads.
- SmallVector<Value *, 6> lbs;
+ SmallVector<ValuePtr, 6> lbs;
// Induction variables of the loops mapped to blocks or threads.
- SmallVector<Value *, 6> ivs;
+ SmallVector<ValuePtr, 6> ivs;
// Steps of the loops mapped to blocks or threads.
- SmallVector<Value *, 6> steps;
+ SmallVector<ValuePtr, 6> steps;
};
} // namespace
// Return true if the value is obviously a constant "one".
-static bool isConstantOne(Value *value) {
+static bool isConstantOne(ValuePtr value) {
if (auto def = dyn_cast_or_null<ConstantIndexOp>(value->getDefiningOp()))
return def.getValue() == 1;
return false;
@@ -244,15 +246,15 @@ Optional<OpTy> LoopToGpuConverter::collectBounds(OpTy forOp,
steps.reserve(numLoops);
OpTy currentLoop = forOp;
for (unsigned i = 0; i < numLoops; ++i) {
- Value *lowerBound = getOrEmitLowerBound(currentLoop, builder);
- Value *upperBound = getOrEmitUpperBound(currentLoop, builder);
+ ValuePtr lowerBound = getOrEmitLowerBound(currentLoop, builder);
+ ValuePtr upperBound = getOrEmitUpperBound(currentLoop, builder);
if (!lowerBound || !upperBound) {
return llvm::None;
}
- Value *range =
+ ValuePtr range =
builder.create<SubIOp>(currentLoop.getLoc(), upperBound, lowerBound);
- Value *step = getOrCreateStep(currentLoop, builder);
+ ValuePtr step = getOrCreateStep(currentLoop, builder);
if (!isConstantOne(step))
range = builder.create<SignedDivIOp>(currentLoop.getLoc(), range, step);
dims.push_back(range);
@@ -274,8 +276,8 @@ Optional<OpTy> LoopToGpuConverter::collectBounds(OpTy forOp,
/// `nids`. The innermost loop is mapped to the x-dimension, followed by the
/// next innermost loop to y-dimension, followed by z-dimension.
template <typename OpTy>
-OpTy createGPULaunchLoops(OpTy rootForOp, ArrayRef<Value *> ids,
- ArrayRef<Value *> nids) {
+OpTy createGPULaunchLoops(OpTy rootForOp, ArrayRef<ValuePtr> ids,
+ ArrayRef<ValuePtr> nids) {
auto nDims = ids.size();
assert(nDims == nids.size());
for (auto dim : llvm::seq<unsigned>(0, nDims)) {
@@ -295,11 +297,11 @@ OpTy createGPULaunchLoops(OpTy rootForOp, ArrayRef<Value *> ids,
/// each workgroup/workitem and number of workgroup/workitems along a dimension
/// of the launch into a container.
void packIdAndNumId(gpu::KernelDim3 kernelIds, gpu::KernelDim3 kernelNids,
- unsigned nDims, SmallVectorImpl<Value *> &ids,
- SmallVectorImpl<Value *> &nids) {
+ unsigned nDims, SmallVectorImpl<ValuePtr> &ids,
+ SmallVectorImpl<ValuePtr> &nids) {
assert(nDims <= 3 && "invalid number of launch dimensions");
- SmallVector<Value *, 3> allIds = {kernelIds.z, kernelIds.y, kernelIds.x};
- SmallVector<Value *, 3> allNids = {kernelNids.z, kernelNids.y, kernelNids.x};
+ SmallVector<ValuePtr, 3> allIds = {kernelIds.z, kernelIds.y, kernelIds.x};
+ SmallVector<ValuePtr, 3> allNids = {kernelNids.z, kernelNids.y, kernelNids.x};
ids.clear();
ids.append(std::next(allIds.begin(), allIds.size() - nDims), allIds.end());
nids.clear();
@@ -317,7 +319,7 @@ LogicalResult createLaunchBody(OpBuilder &builder, OpTy rootForOp,
auto returnOp = builder.create<gpu::ReturnOp>(launchOp.getLoc());
rootForOp.getOperation()->moveBefore(returnOp);
- SmallVector<Value *, 3> workgroupID, numWorkGroups;
+ SmallVector<ValuePtr, 3> workgroupID, numWorkGroups;
packIdAndNumId(launchOp.getBlockIds(), launchOp.getGridSize(), numBlockDims,
workgroupID, numWorkGroups);
@@ -333,7 +335,7 @@ LogicalResult createLaunchBody(OpBuilder &builder, OpTy rootForOp,
}
}
- SmallVector<Value *, 3> workItemID, workGroupSize;
+ SmallVector<ValuePtr, 3> workItemID, workGroupSize;
packIdAndNumId(launchOp.getThreadIds(), launchOp.getBlockSize(),
numThreadDims, workItemID, workGroupSize);
for (auto &loopOp : threadRootForOps) {
@@ -347,17 +349,17 @@ LogicalResult createLaunchBody(OpBuilder &builder, OpTy rootForOp,
// given workgroup size and number of workgroups.
template <typename OpTy>
LogicalResult createLaunchFromOp(OpTy rootForOp,
- ArrayRef<Value *> numWorkGroups,
- ArrayRef<Value *> workGroupSizes) {
+ ArrayRef<ValuePtr> numWorkGroups,
+ ArrayRef<ValuePtr> workGroupSizes) {
OpBuilder builder(rootForOp.getOperation());
if (numWorkGroups.size() > 3) {
return rootForOp.emitError("invalid ")
<< numWorkGroups.size() << "-D workgroup specification";
}
auto loc = rootForOp.getLoc();
- Value *one = builder.create<ConstantOp>(
+ ValuePtr one = builder.create<ConstantOp>(
loc, builder.getIntegerAttr(builder.getIndexType(), 1));
- SmallVector<Value *, 3> numWorkGroups3D(3, one), workGroupSize3D(3, one);
+ SmallVector<ValuePtr, 3> numWorkGroups3D(3, one), workGroupSize3D(3, one);
for (auto numWorkGroup : enumerate(numWorkGroups)) {
numWorkGroups3D[numWorkGroup.index()] = numWorkGroup.value();
}
@@ -367,7 +369,7 @@ LogicalResult createLaunchFromOp(OpTy rootForOp,
// Get the values used within the region of the rootForOp but defined above
// it.
- llvm::SetVector<Value *> valuesToForwardSet;
+ llvm::SetVector<ValuePtr> valuesToForwardSet;
getUsedValuesDefinedAbove(rootForOp.region(), rootForOp.region(),
valuesToForwardSet);
// Also add the values used for the lb, ub, and step of the rootForOp.
@@ -387,8 +389,8 @@ LogicalResult createLaunchFromOp(OpTy rootForOp,
// defined outside. They all are replaced with kernel arguments.
for (const auto &pair :
llvm::zip_first(valuesToForward, launchOp.getKernelArguments())) {
- Value *from = std::get<0>(pair);
- Value *to = std::get<1>(pair);
+ ValuePtr from = std::get<0>(pair);
+ ValuePtr to = std::get<1>(pair);
replaceAllUsesInRegionWith(from, to, launchOp.body());
}
return success();
@@ -408,22 +410,23 @@ void LoopToGpuConverter::createLaunch(OpTy rootForOp, OpTy innermostForOp,
OpBuilder builder(rootForOp.getOperation());
// Prepare the grid and block sizes for the launch operation. If there is
// no loop mapped to a specific dimension, use constant "1" as its size.
- Value *constOne = (numBlockDims < 3 || numThreadDims < 3)
- ? builder.create<ConstantIndexOp>(rootForOp.getLoc(), 1)
- : nullptr;
- Value *gridSizeX = dims[0];
- Value *gridSizeY = numBlockDims > 1 ? dims[1] : constOne;
- Value *gridSizeZ = numBlockDims > 2 ? dims[2] : constOne;
- Value *blockSizeX = dims[numBlockDims];
- Value *blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne;
- Value *blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne;
+ ValuePtr constOne =
+ (numBlockDims < 3 || numThreadDims < 3)
+ ? builder.create<ConstantIndexOp>(rootForOp.getLoc(), 1)
+ : nullptr;
+ ValuePtr gridSizeX = dims[0];
+ ValuePtr gridSizeY = numBlockDims > 1 ? dims[1] : constOne;
+ ValuePtr gridSizeZ = numBlockDims > 2 ? dims[2] : constOne;
+ ValuePtr blockSizeX = dims[numBlockDims];
+ ValuePtr blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne;
+ ValuePtr blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne;
// Create a launch op and move the body region of the innermost loop to the
// launch op. Pass the values defined outside the outermost loop and used
// inside the innermost loop and loop lower bounds as kernel data arguments.
// Still assuming perfect nesting so there are no values other than induction
// variables that are defined in one loop and used in deeper loops.
- llvm::SetVector<Value *> valuesToForwardSet;
+ llvm::SetVector<ValuePtr> valuesToForwardSet;
getUsedValuesDefinedAbove(innermostForOp.region(), rootForOp.region(),
valuesToForwardSet);
auto valuesToForward = valuesToForwardSet.takeVector();
@@ -457,15 +460,15 @@ void LoopToGpuConverter::createLaunch(OpTy rootForOp, OpTy innermostForOp,
originallyForwardedValues);
auto stepArgumentIt = std::next(lbArgumentIt, lbs.size());
for (auto en : llvm::enumerate(ivs)) {
- Value *id =
+ ValuePtr id =
en.index() < numBlockDims
? getDim3Value(launchOp.getBlockIds(), en.index())
: getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims);
- Value *step = steps[en.index()];
+ ValuePtr step = steps[en.index()];
if (!isConstantOne(step))
id = builder.create<MulIOp>(rootForOp.getLoc(), step, id);
- Value *ivReplacement =
+ ValuePtr ivReplacement =
builder.create<AddIOp>(rootForOp.getLoc(), *lbArgumentIt, id);
en.value()->replaceAllUsesWith(ivReplacement);
replaceAllUsesInRegionWith(steps[en.index()], *stepArgumentIt,
@@ -479,8 +482,8 @@ void LoopToGpuConverter::createLaunch(OpTy rootForOp, OpTy innermostForOp,
// trailing positions, make sure we don't touch those.
for (const auto &pair :
llvm::zip_first(valuesToForward, launchOp.getKernelArguments())) {
- Value *from = std::get<0>(pair);
- Value *to = std::get<1>(pair);
+ ValuePtr from = std::get<0>(pair);
+ ValuePtr to = std::get<1>(pair);
replaceAllUsesInRegionWith(from, to, launchOp.body());
}
@@ -510,8 +513,8 @@ static LogicalResult convertLoopNestToGPULaunch(OpTy forOp,
// nested. The workgroup size and num workgroups is provided as input
template <typename OpTy>
static LogicalResult convertLoopToGPULaunch(OpTy forOp,
- ArrayRef<Value *> numWorkGroups,
- ArrayRef<Value *> workGroupSize) {
+ ArrayRef<ValuePtr> numWorkGroups,
+ ArrayRef<ValuePtr> workGroupSize) {
if (failed(checkLoopOpMappable(forOp, numWorkGroups.size(),
workGroupSize.size()))) {
return failure();
@@ -532,7 +535,7 @@ LogicalResult mlir::convertLoopNestToGPULaunch(ForOp forOp,
}
LogicalResult mlir::convertLoopToGPULaunch(loop::ForOp forOp,
- ArrayRef<Value *> numWorkGroups,
- ArrayRef<Value *> workGroupSizes) {
+ ArrayRef<ValuePtr> numWorkGroups,
+ ArrayRef<ValuePtr> workGroupSizes) {
return ::convertLoopToGPULaunch(forOp, numWorkGroups, workGroupSizes);
}
diff --git a/mlir/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp b/mlir/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp
index 21abc3cf99b..63836883512 100644
--- a/mlir/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp
+++ b/mlir/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp
@@ -98,7 +98,7 @@ struct ImperfectlyNestedForLoopMapper
// pass is only used for testing.
FuncOp funcOp = getFunction();
OpBuilder builder(funcOp.getOperation()->getRegion(0));
- SmallVector<Value *, 3> numWorkGroupsVal, workGroupSizeVal;
+ SmallVector<ValuePtr, 3> numWorkGroupsVal, workGroupSizeVal;
for (auto val : numWorkGroups) {
auto constOp = builder.create<ConstantOp>(
funcOp.getLoc(), builder.getIntegerAttr(builder.getIndexType(), val));
diff --git a/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp
index fdc90851b64..67b545c4ec8 100644
--- a/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp
+++ b/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp
@@ -256,20 +256,20 @@ LLVMOpLowering::LLVMOpLowering(StringRef rootOpName, MLIRContext *context,
/*============================================================================*/
/* StructBuilder implementation */
/*============================================================================*/
-StructBuilder::StructBuilder(Value *v) : value(v) {
+StructBuilder::StructBuilder(ValuePtr v) : value(v) {
assert(value != nullptr && "value cannot be null");
structType = value->getType().cast<LLVM::LLVMType>();
}
-Value *StructBuilder::extractPtr(OpBuilder &builder, Location loc,
- unsigned pos) {
+ValuePtr StructBuilder::extractPtr(OpBuilder &builder, Location loc,
+ unsigned pos) {
Type type = structType.cast<LLVM::LLVMType>().getStructElementType(pos);
return builder.create<LLVM::ExtractValueOp>(loc, type, value,
builder.getI64ArrayAttr(pos));
}
void StructBuilder::setPtr(OpBuilder &builder, Location loc, unsigned pos,
- Value *ptr) {
+ ValuePtr ptr) {
value = builder.create<LLVM::InsertValueOp>(loc, structType, value, ptr,
builder.getI64ArrayAttr(pos));
}
@@ -278,7 +278,7 @@ void StructBuilder::setPtr(OpBuilder &builder, Location loc, unsigned pos,
/*============================================================================*/
/// Construct a helper for the given descriptor value.
-MemRefDescriptor::MemRefDescriptor(Value *descriptor)
+MemRefDescriptor::MemRefDescriptor(ValuePtr descriptor)
: StructBuilder(descriptor) {
assert(value != nullptr && "value cannot be null");
indexType = value->getType().cast<LLVM::LLVMType>().getStructElementType(
@@ -289,7 +289,7 @@ MemRefDescriptor::MemRefDescriptor(Value *descriptor)
MemRefDescriptor MemRefDescriptor::undef(OpBuilder &builder, Location loc,
Type descriptorType) {
- Value *descriptor =
+ ValuePtr descriptor =
builder.create<LLVM::UndefOp>(loc, descriptorType.cast<LLVM::LLVMType>());
return MemRefDescriptor(descriptor);
}
@@ -300,7 +300,7 @@ MemRefDescriptor MemRefDescriptor::undef(OpBuilder &builder, Location loc,
MemRefDescriptor
MemRefDescriptor::fromStaticShape(OpBuilder &builder, Location loc,
LLVMTypeConverter &typeConverter,
- MemRefType type, Value *memory) {
+ MemRefType type, ValuePtr memory) {
assert(type.hasStaticShape() && "unexpected dynamic shape");
assert(type.getAffineMaps().empty() && "unexpected layout map");
@@ -325,37 +325,37 @@ MemRefDescriptor::fromStaticShape(OpBuilder &builder, Location loc,
}
/// Builds IR extracting the allocated pointer from the descriptor.
-Value *MemRefDescriptor::allocatedPtr(OpBuilder &builder, Location loc) {
+ValuePtr MemRefDescriptor::allocatedPtr(OpBuilder &builder, Location loc) {
return extractPtr(builder, loc, kAllocatedPtrPosInMemRefDescriptor);
}
/// Builds IR inserting the allocated pointer into the descriptor.
void MemRefDescriptor::setAllocatedPtr(OpBuilder &builder, Location loc,
- Value *ptr) {
+ ValuePtr ptr) {
setPtr(builder, loc, kAllocatedPtrPosInMemRefDescriptor, ptr);
}
/// Builds IR extracting the aligned pointer from the descriptor.
-Value *MemRefDescriptor::alignedPtr(OpBuilder &builder, Location loc) {
+ValuePtr MemRefDescriptor::alignedPtr(OpBuilder &builder, Location loc) {
return extractPtr(builder, loc, kAlignedPtrPosInMemRefDescriptor);
}
/// Builds IR inserting the aligned pointer into the descriptor.
void MemRefDescriptor::setAlignedPtr(OpBuilder &builder, Location loc,
- Value *ptr) {
+ ValuePtr ptr) {
setPtr(builder, loc, kAlignedPtrPosInMemRefDescriptor, ptr);
}
// Creates a constant Op producing a value of `resultType` from an index-typed
// integer attribute.
-static Value *createIndexAttrConstant(OpBuilder &builder, Location loc,
- Type resultType, int64_t value) {
+static ValuePtr createIndexAttrConstant(OpBuilder &builder, Location loc,
+ Type resultType, int64_t value) {
return builder.create<LLVM::ConstantOp>(
loc, resultType, builder.getIntegerAttr(builder.getIndexType(), value));
}
/// Builds IR extracting the offset from the descriptor.
-Value *MemRefDescriptor::offset(OpBuilder &builder, Location loc) {
+ValuePtr MemRefDescriptor::offset(OpBuilder &builder, Location loc) {
return builder.create<LLVM::ExtractValueOp>(
loc, indexType, value,
builder.getI64ArrayAttr(kOffsetPosInMemRefDescriptor));
@@ -363,7 +363,7 @@ Value *MemRefDescriptor::offset(OpBuilder &builder, Location loc) {
/// Builds IR inserting the offset into the descriptor.
void MemRefDescriptor::setOffset(OpBuilder &builder, Location loc,
- Value *offset) {
+ ValuePtr offset) {
value = builder.create<LLVM::InsertValueOp>(
loc, structType, value, offset,
builder.getI64ArrayAttr(kOffsetPosInMemRefDescriptor));
@@ -377,7 +377,8 @@ void MemRefDescriptor::setConstantOffset(OpBuilder &builder, Location loc,
}
/// Builds IR extracting the pos-th size from the descriptor.
-Value *MemRefDescriptor::size(OpBuilder &builder, Location loc, unsigned pos) {
+ValuePtr MemRefDescriptor::size(OpBuilder &builder, Location loc,
+ unsigned pos) {
return builder.create<LLVM::ExtractValueOp>(
loc, indexType, value,
builder.getI64ArrayAttr({kSizePosInMemRefDescriptor, pos}));
@@ -385,7 +386,7 @@ Value *MemRefDescriptor::size(OpBuilder &builder, Location loc, unsigned pos) {
/// Builds IR inserting the pos-th size into the descriptor
void MemRefDescriptor::setSize(OpBuilder &builder, Location loc, unsigned pos,
- Value *size) {
+ ValuePtr size) {
value = builder.create<LLVM::InsertValueOp>(
loc, structType, value, size,
builder.getI64ArrayAttr({kSizePosInMemRefDescriptor, pos}));
@@ -399,8 +400,8 @@ void MemRefDescriptor::setConstantSize(OpBuilder &builder, Location loc,
}
/// Builds IR extracting the pos-th size from the descriptor.
-Value *MemRefDescriptor::stride(OpBuilder &builder, Location loc,
- unsigned pos) {
+ValuePtr MemRefDescriptor::stride(OpBuilder &builder, Location loc,
+ unsigned pos) {
return builder.create<LLVM::ExtractValueOp>(
loc, indexType, value,
builder.getI64ArrayAttr({kStridePosInMemRefDescriptor, pos}));
@@ -408,7 +409,7 @@ Value *MemRefDescriptor::stride(OpBuilder &builder, Location loc,
/// Builds IR inserting the pos-th stride into the descriptor
void MemRefDescriptor::setStride(OpBuilder &builder, Location loc, unsigned pos,
- Value *stride) {
+ ValuePtr stride) {
value = builder.create<LLVM::InsertValueOp>(
loc, structType, value, stride,
builder.getI64ArrayAttr({kStridePosInMemRefDescriptor, pos}));
@@ -431,30 +432,30 @@ LLVM::LLVMType MemRefDescriptor::getElementType() {
/*============================================================================*/
/// Construct a helper for the given descriptor value.
-UnrankedMemRefDescriptor::UnrankedMemRefDescriptor(Value *descriptor)
+UnrankedMemRefDescriptor::UnrankedMemRefDescriptor(ValuePtr descriptor)
: StructBuilder(descriptor) {}
/// Builds IR creating an `undef` value of the descriptor type.
UnrankedMemRefDescriptor UnrankedMemRefDescriptor::undef(OpBuilder &builder,
Location loc,
Type descriptorType) {
- Value *descriptor =
+ ValuePtr descriptor =
builder.create<LLVM::UndefOp>(loc, descriptorType.cast<LLVM::LLVMType>());
return UnrankedMemRefDescriptor(descriptor);
}
-Value *UnrankedMemRefDescriptor::rank(OpBuilder &builder, Location loc) {
+ValuePtr UnrankedMemRefDescriptor::rank(OpBuilder &builder, Location loc) {
return extractPtr(builder, loc, kRankInUnrankedMemRefDescriptor);
}
void UnrankedMemRefDescriptor::setRank(OpBuilder &builder, Location loc,
- Value *v) {
+ ValuePtr v) {
setPtr(builder, loc, kRankInUnrankedMemRefDescriptor, v);
}
-Value *UnrankedMemRefDescriptor::memRefDescPtr(OpBuilder &builder,
- Location loc) {
+ValuePtr UnrankedMemRefDescriptor::memRefDescPtr(OpBuilder &builder,
+ Location loc) {
return extractPtr(builder, loc, kPtrInUnrankedMemRefDescriptor);
}
void UnrankedMemRefDescriptor::setMemRefDescPtr(OpBuilder &builder,
- Location loc, Value *v) {
+ Location loc, ValuePtr v) {
setPtr(builder, loc, kPtrInUnrankedMemRefDescriptor, v);
}
namespace {
@@ -495,8 +496,8 @@ public:
}
// Create an LLVM IR pseudo-operation defining the given index constant.
- Value *createIndexConstant(ConversionPatternRewriter &builder, Location loc,
- uint64_t value) const {
+ ValuePtr createIndexConstant(ConversionPatternRewriter &builder, Location loc,
+ uint64_t value) const {
return createIndexAttrConstant(builder, loc, getIndexType(), value);
}
@@ -508,7 +509,7 @@ struct FuncOpConversion : public LLVMLegalizationPattern<FuncOp> {
using LLVMLegalizationPattern<FuncOp>::LLVMLegalizationPattern;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto funcOp = cast<FuncOp>(op);
FunctionType type = funcOp.getType();
@@ -556,8 +557,8 @@ struct FuncOpConversion : public LLVMLegalizationPattern<FuncOp> {
Block *firstBlock = &newFuncOp.getBody().front();
rewriter.setInsertionPoint(firstBlock, firstBlock->begin());
for (unsigned idx : promotedArgIndices) {
- BlockArgument *arg = firstBlock->getArgument(idx);
- Value *loaded = rewriter.create<LLVM::LoadOp>(funcOp.getLoc(), arg);
+ BlockArgumentPtr arg = firstBlock->getArgument(idx);
+ ValuePtr loaded = rewriter.create<LLVM::LoadOp>(funcOp.getLoc(), arg);
rewriter.replaceUsesOfBlockArgument(arg, loaded);
}
}
@@ -656,7 +657,7 @@ struct OneToOneLLVMOpLowering : public LLVMLegalizationPattern<SourceOp> {
// Convert the type of the result to an LLVM type, pass operands as is,
// preserve attributes.
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
unsigned numResults = op->getNumResults();
@@ -680,7 +681,7 @@ struct OneToOneLLVMOpLowering : public LLVMLegalizationPattern<SourceOp> {
// Otherwise, it had been converted to an operation producing a structure.
// Extract individual results from the structure and return them as list.
- SmallVector<Value *, 4> results;
+ SmallVector<ValuePtr, 4> results;
results.reserve(numResults);
for (unsigned i = 0; i < numResults; ++i) {
auto type = this->lowering.convertType(op->getResult(i)->getType());
@@ -721,7 +722,7 @@ struct NaryOpLLVMOpLowering : public LLVMLegalizationPattern<SourceOp> {
// Convert the type of the result to an LLVM type, pass operands as is,
// preserve attributes.
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
ValidateOpCount<SourceOp, OpCount>();
static_assert(
@@ -732,7 +733,7 @@ struct NaryOpLLVMOpLowering : public LLVMLegalizationPattern<SourceOp> {
"expected same operands and result type");
// Cannot convert ops if their operands are not of LLVM type.
- for (Value *operand : operands) {
+ for (ValuePtr operand : operands) {
if (!operand || !operand->getType().isa<LLVM::LLVMType>())
return this->matchFailure();
}
@@ -755,16 +756,16 @@ struct NaryOpLLVMOpLowering : public LLVMLegalizationPattern<SourceOp> {
if (!llvmVectorTy || llvmArrayTy != vectorTypeInfo.llvmArrayTy)
return this->matchFailure();
- Value *desc = rewriter.create<LLVM::UndefOp>(loc, llvmArrayTy);
+ ValuePtr desc = rewriter.create<LLVM::UndefOp>(loc, llvmArrayTy);
nDVectorIterate(vectorTypeInfo, rewriter, [&](ArrayAttr position) {
// For this unrolled `position` corresponding to the `linearIndex`^th
// element, extract operand vectors
- SmallVector<Value *, OpCount> extractedOperands;
+ SmallVector<ValuePtr, OpCount> extractedOperands;
for (unsigned i = 0; i < OpCount; ++i) {
extractedOperands.push_back(rewriter.create<LLVM::ExtractValueOp>(
loc, llvmVectorTy, operands[i], position));
}
- Value *newVal = rewriter.create<TargetOp>(
+ ValuePtr newVal = rewriter.create<TargetOp>(
loc, llvmVectorTy, extractedOperands, op->getAttrs());
desc = rewriter.create<LLVM::InsertValueOp>(loc, llvmArrayTy, desc,
newVal, position);
@@ -927,7 +928,7 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
return matchSuccess();
}
- void rewrite(Operation *op, ArrayRef<Value *> operands,
+ void rewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto loc = op->getLoc();
auto allocOp = cast<AllocOp>(op);
@@ -936,7 +937,7 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
// Get actual sizes of the memref as values: static sizes are constant
// values and dynamic sizes are passed to 'alloc' as operands. In case of
// zero-dimensional memref, assume a scalar (size 1).
- SmallVector<Value *, 4> sizes;
+ SmallVector<ValuePtr, 4> sizes;
sizes.reserve(type.getRank());
unsigned i = 0;
for (int64_t s : type.getShape())
@@ -946,10 +947,10 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
sizes.push_back(createIndexConstant(rewriter, loc, 1));
// Compute the total number of memref elements.
- Value *cumulativeSize = sizes.front();
+ ValuePtr cumulativeSize = sizes.front();
for (unsigned i = 1, e = sizes.size(); i < e; ++i)
cumulativeSize = rewriter.create<LLVM::MulOp>(
- loc, getIndexType(), ArrayRef<Value *>{cumulativeSize, sizes[i]});
+ loc, getIndexType(), ArrayRef<ValuePtr>{cumulativeSize, sizes[i]});
// Compute the size of an individual element. This emits the MLIR equivalent
// of the following sizeof(...) implementation in LLVM IR:
@@ -962,17 +963,17 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
auto nullPtr = rewriter.create<LLVM::NullOp>(loc, convertedPtrType);
auto one = createIndexConstant(rewriter, loc, 1);
auto gep = rewriter.create<LLVM::GEPOp>(loc, convertedPtrType,
- ArrayRef<Value *>{nullPtr, one});
+ ArrayRef<ValuePtr>{nullPtr, one});
auto elementSize =
rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), gep);
cumulativeSize = rewriter.create<LLVM::MulOp>(
- loc, getIndexType(), ArrayRef<Value *>{cumulativeSize, elementSize});
+ loc, getIndexType(), ArrayRef<ValuePtr>{cumulativeSize, elementSize});
// Allocate the underlying buffer and store a pointer to it in the MemRef
// descriptor.
- Value *allocated = nullptr;
+ ValuePtr allocated = nullptr;
int alignment = 0;
- Value *alignmentValue = nullptr;
+ ValuePtr alignmentValue = nullptr;
if (auto alignAttr = allocOp.alignment())
alignment = alignAttr.getValue().getSExtValue();
@@ -1008,8 +1009,8 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
auto structElementType = lowering.convertType(elementType);
auto elementPtrType = structElementType.cast<LLVM::LLVMType>().getPointerTo(
type.getMemorySpace());
- Value *bitcastAllocated = rewriter.create<LLVM::BitcastOp>(
- loc, elementPtrType, ArrayRef<Value *>(allocated));
+ ValuePtr bitcastAllocated = rewriter.create<LLVM::BitcastOp>(
+ loc, elementPtrType, ArrayRef<ValuePtr>(allocated));
int64_t offset;
SmallVector<int64_t, 4> strides;
@@ -1031,22 +1032,22 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
memRefDescriptor.setAllocatedPtr(rewriter, loc, bitcastAllocated);
// Field 2: Actual aligned pointer to payload.
- Value *bitcastAligned = bitcastAllocated;
+ ValuePtr bitcastAligned = bitcastAllocated;
if (!useAlloca && alignment != 0) {
assert(alignmentValue);
// offset = (align - (ptr % align))% align
- Value *intVal = rewriter.create<LLVM::PtrToIntOp>(
+ ValuePtr intVal = rewriter.create<LLVM::PtrToIntOp>(
loc, this->getIndexType(), allocated);
- Value *ptrModAlign =
+ ValuePtr ptrModAlign =
rewriter.create<LLVM::URemOp>(loc, intVal, alignmentValue);
- Value *subbed =
+ ValuePtr subbed =
rewriter.create<LLVM::SubOp>(loc, alignmentValue, ptrModAlign);
- Value *offset =
+ ValuePtr offset =
rewriter.create<LLVM::URemOp>(loc, subbed, alignmentValue);
- Value *aligned = rewriter.create<LLVM::GEPOp>(loc, allocated->getType(),
- allocated, offset);
+ ValuePtr aligned = rewriter.create<LLVM::GEPOp>(loc, allocated->getType(),
+ allocated, offset);
bitcastAligned = rewriter.create<LLVM::BitcastOp>(
- loc, elementPtrType, ArrayRef<Value *>(aligned));
+ loc, elementPtrType, ArrayRef<ValuePtr>(aligned));
}
memRefDescriptor.setAlignedPtr(rewriter, loc, bitcastAligned);
@@ -1061,10 +1062,10 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
// Fields 4 and 5: Sizes and strides of the strided MemRef.
// Store all sizes in the descriptor. Only dynamic sizes are passed in as
// operands to AllocOp.
- Value *runningStride = nullptr;
+ ValuePtr runningStride = nullptr;
// Iterate strides in reverse order, compute runningStride and strideValues.
auto nStrides = strides.size();
- SmallVector<Value *, 4> strideValues(nStrides, nullptr);
+ SmallVector<ValuePtr, 4> strideValues(nStrides, nullptr);
for (auto indexedStride : llvm::enumerate(llvm::reverse(strides))) {
int64_t index = nStrides - 1 - indexedStride.index();
if (strides[index] == MemRefType::getDynamicStrideOrOffset())
@@ -1101,7 +1102,7 @@ struct CallOpInterfaceLowering : public LLVMLegalizationPattern<CallOpType> {
using Base = LLVMLegalizationPattern<CallOpType>;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
OperandAdaptor<CallOpType> transformed(operands);
auto callOp = cast<CallOpType>(op);
@@ -1139,7 +1140,7 @@ struct CallOpInterfaceLowering : public LLVMLegalizationPattern<CallOpType> {
// TODO(aminim, ntv, riverriddle, zinenko): this seems like patching around
// a particular interaction between MemRefType and CallOp lowering. Find a
// way to avoid special casing.
- SmallVector<Value *, 4> results;
+ SmallVector<ValuePtr, 4> results;
results.reserve(numResults);
for (unsigned i = 0; i < numResults; ++i) {
auto type = this->lowering.convertType(op->getResult(i)->getType());
@@ -1173,7 +1174,7 @@ struct DeallocOpLowering : public LLVMLegalizationPattern<DeallocOp> {
useAlloca(useAlloca) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
if (useAlloca)
return rewriter.eraseOp(op), matchSuccess();
@@ -1193,7 +1194,7 @@ struct DeallocOpLowering : public LLVMLegalizationPattern<DeallocOp> {
}
MemRefDescriptor memref(transformed.memref());
- Value *casted = rewriter.create<LLVM::BitcastOp>(
+ ValuePtr casted = rewriter.create<LLVM::BitcastOp>(
op->getLoc(), getVoidPtrType(),
memref.allocatedPtr(rewriter, op->getLoc()));
rewriter.replaceOpWithNewOp<LLVM::CallOp>(
@@ -1209,7 +1210,7 @@ struct TanhOpLowering : public LLVMLegalizationPattern<TanhOp> {
using LLVMLegalizationPattern<TanhOp>::LLVMLegalizationPattern;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
using LLVMFuncOpT = LLVM::LLVMFuncOp;
@@ -1283,7 +1284,7 @@ struct MemRefCastOpLowering : public LLVMLegalizationPattern<MemRefCastOp> {
: matchFailure();
}
- void rewrite(Operation *op, ArrayRef<Value *> operands,
+ void rewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto memRefCastOp = cast<MemRefCastOp>(op);
OperandAdaptor<MemRefCastOp> transformed(operands);
@@ -1324,7 +1325,7 @@ struct MemRefCastOpLowering : public LLVMLegalizationPattern<MemRefCastOp> {
memRefDesc.setRank(rewriter, loc, rankVal);
// d2 = InsertValueOp d1, voidptr, 1
memRefDesc.setMemRefDescPtr(rewriter, loc, voidPtr);
- rewriter.replaceOp(op, (Value *)memRefDesc);
+ rewriter.replaceOp(op, (ValuePtr)memRefDesc);
} else if (srcType.isa<UnrankedMemRefType>() && dstType.isa<MemRefType>()) {
// Casting from unranked type to ranked.
@@ -1355,7 +1356,7 @@ struct DimOpLowering : public LLVMLegalizationPattern<DimOp> {
using LLVMLegalizationPattern<DimOp>::LLVMLegalizationPattern;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto dimOp = cast<DimOp>(op);
OperandAdaptor<DimOp> transformed(operands);
@@ -1397,43 +1398,45 @@ struct LoadStoreOpLowering : public LLVMLegalizationPattern<Derived> {
// by accumulating the running linearized value.
// Note that `indices` and `allocSizes` are passed in the same order as they
// appear in load/store operations and memref type declarations.
- Value *linearizeSubscripts(ConversionPatternRewriter &builder, Location loc,
- ArrayRef<Value *> indices,
- ArrayRef<Value *> allocSizes) const {
+ ValuePtr linearizeSubscripts(ConversionPatternRewriter &builder, Location loc,
+ ArrayRef<ValuePtr> indices,
+ ArrayRef<ValuePtr> allocSizes) const {
assert(indices.size() == allocSizes.size() &&
"mismatching number of indices and allocation sizes");
assert(!indices.empty() && "cannot linearize a 0-dimensional access");
- Value *linearized = indices.front();
+ ValuePtr linearized = indices.front();
for (int i = 1, nSizes = allocSizes.size(); i < nSizes; ++i) {
linearized = builder.create<LLVM::MulOp>(
loc, this->getIndexType(),
- ArrayRef<Value *>{linearized, allocSizes[i]});
+ ArrayRef<ValuePtr>{linearized, allocSizes[i]});
linearized = builder.create<LLVM::AddOp>(
- loc, this->getIndexType(), ArrayRef<Value *>{linearized, indices[i]});
+ loc, this->getIndexType(),
+ ArrayRef<ValuePtr>{linearized, indices[i]});
}
return linearized;
}
// This is a strided getElementPtr variant that linearizes subscripts as:
// `base_offset + index_0 * stride_0 + ... + index_n * stride_n`.
- Value *getStridedElementPtr(Location loc, Type elementTypePtr,
- Value *descriptor, ArrayRef<Value *> indices,
- ArrayRef<int64_t> strides, int64_t offset,
- ConversionPatternRewriter &rewriter) const {
+ ValuePtr getStridedElementPtr(Location loc, Type elementTypePtr,
+ ValuePtr descriptor, ArrayRef<ValuePtr> indices,
+ ArrayRef<int64_t> strides, int64_t offset,
+ ConversionPatternRewriter &rewriter) const {
MemRefDescriptor memRefDescriptor(descriptor);
- Value *base = memRefDescriptor.alignedPtr(rewriter, loc);
- Value *offsetValue = offset == MemRefType::getDynamicStrideOrOffset()
- ? memRefDescriptor.offset(rewriter, loc)
- : this->createIndexConstant(rewriter, loc, offset);
+ ValuePtr base = memRefDescriptor.alignedPtr(rewriter, loc);
+ ValuePtr offsetValue =
+ offset == MemRefType::getDynamicStrideOrOffset()
+ ? memRefDescriptor.offset(rewriter, loc)
+ : this->createIndexConstant(rewriter, loc, offset);
for (int i = 0, e = indices.size(); i < e; ++i) {
- Value *stride =
+ ValuePtr stride =
strides[i] == MemRefType::getDynamicStrideOrOffset()
? memRefDescriptor.stride(rewriter, loc, i)
: this->createIndexConstant(rewriter, loc, strides[i]);
- Value *additionalOffset =
+ ValuePtr additionalOffset =
rewriter.create<LLVM::MulOp>(loc, indices[i], stride);
offsetValue =
rewriter.create<LLVM::AddOp>(loc, offsetValue, additionalOffset);
@@ -1441,10 +1444,10 @@ struct LoadStoreOpLowering : public LLVMLegalizationPattern<Derived> {
return rewriter.create<LLVM::GEPOp>(loc, elementTypePtr, base, offsetValue);
}
- Value *getDataPtr(Location loc, MemRefType type, Value *memRefDesc,
- ArrayRef<Value *> indices,
- ConversionPatternRewriter &rewriter,
- llvm::Module &module) const {
+ ValuePtr getDataPtr(Location loc, MemRefType type, ValuePtr memRefDesc,
+ ArrayRef<ValuePtr> indices,
+ ConversionPatternRewriter &rewriter,
+ llvm::Module &module) const {
LLVM::LLVMType ptrType = MemRefDescriptor(memRefDesc).getElementType();
int64_t offset;
SmallVector<int64_t, 4> strides;
@@ -1462,14 +1465,14 @@ struct LoadOpLowering : public LoadStoreOpLowering<LoadOp> {
using Base::Base;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto loadOp = cast<LoadOp>(op);
OperandAdaptor<LoadOp> transformed(operands);
auto type = loadOp.getMemRefType();
- Value *dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(),
- transformed.indices(), rewriter, getModule());
+ ValuePtr dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(),
+ transformed.indices(), rewriter, getModule());
rewriter.replaceOpWithNewOp<LLVM::LoadOp>(op, dataPtr);
return matchSuccess();
}
@@ -1481,13 +1484,13 @@ struct StoreOpLowering : public LoadStoreOpLowering<StoreOp> {
using Base::Base;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto type = cast<StoreOp>(op).getMemRefType();
OperandAdaptor<StoreOp> transformed(operands);
- Value *dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(),
- transformed.indices(), rewriter, getModule());
+ ValuePtr dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(),
+ transformed.indices(), rewriter, getModule());
rewriter.replaceOpWithNewOp<LLVM::StoreOp>(op, transformed.value(),
dataPtr);
return matchSuccess();
@@ -1500,14 +1503,14 @@ struct PrefetchOpLowering : public LoadStoreOpLowering<PrefetchOp> {
using Base::Base;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto prefetchOp = cast<PrefetchOp>(op);
OperandAdaptor<PrefetchOp> transformed(operands);
auto type = prefetchOp.getMemRefType();
- Value *dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(),
- transformed.indices(), rewriter, getModule());
+ ValuePtr dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(),
+ transformed.indices(), rewriter, getModule());
// Replace with llvm.prefetch.
auto llvmI32Type = lowering.convertType(rewriter.getIntegerType(32));
@@ -1535,7 +1538,7 @@ struct IndexCastOpLowering : public LLVMLegalizationPattern<IndexCastOp> {
using LLVMLegalizationPattern<IndexCastOp>::LLVMLegalizationPattern;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
IndexCastOpOperandAdaptor transformed(operands);
auto indexCastOp = cast<IndexCastOp>(op);
@@ -1570,7 +1573,7 @@ struct CmpIOpLowering : public LLVMLegalizationPattern<CmpIOp> {
using LLVMLegalizationPattern<CmpIOp>::LLVMLegalizationPattern;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto cmpiOp = cast<CmpIOp>(op);
CmpIOpOperandAdaptor transformed(operands);
@@ -1589,7 +1592,7 @@ struct CmpFOpLowering : public LLVMLegalizationPattern<CmpFOp> {
using LLVMLegalizationPattern<CmpFOp>::LLVMLegalizationPattern;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto cmpfOp = cast<CmpFOp>(op);
CmpFOpOperandAdaptor transformed(operands);
@@ -1641,9 +1644,9 @@ struct OneToOneLLVMTerminatorLowering
using Super = OneToOneLLVMTerminatorLowering<SourceOp, TargetOp>;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> properOperands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const override {
SmallVector<ValueRange, 2> operandRanges(operands.begin(), operands.end());
rewriter.replaceOpWithNewOp<TargetOp>(op, properOperands, destinations,
@@ -1662,19 +1665,19 @@ struct ReturnOpLowering : public LLVMLegalizationPattern<ReturnOp> {
using LLVMLegalizationPattern<ReturnOp>::LLVMLegalizationPattern;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
unsigned numArguments = op->getNumOperands();
// If ReturnOp has 0 or 1 operand, create it and return immediately.
if (numArguments == 0) {
rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(
- op, ArrayRef<Value *>(), ArrayRef<Block *>(), op->getAttrs());
+ op, ArrayRef<ValuePtr>(), ArrayRef<Block *>(), op->getAttrs());
return matchSuccess();
}
if (numArguments == 1) {
rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(
- op, ArrayRef<Value *>(operands.front()), ArrayRef<Block *>(),
+ op, ArrayRef<ValuePtr>(operands.front()), ArrayRef<Block *>(),
op->getAttrs());
return matchSuccess();
}
@@ -1684,7 +1687,7 @@ struct ReturnOpLowering : public LLVMLegalizationPattern<ReturnOp> {
auto packedType =
lowering.packFunctionResults(llvm::to_vector<4>(op->getOperandTypes()));
- Value *packed = rewriter.create<LLVM::UndefOp>(op->getLoc(), packedType);
+ ValuePtr packed = rewriter.create<LLVM::UndefOp>(op->getLoc(), packedType);
for (unsigned i = 0; i < numArguments; ++i) {
packed = rewriter.create<LLVM::InsertValueOp>(
op->getLoc(), packedType, packed, operands[i],
@@ -1712,7 +1715,7 @@ struct SplatOpLowering : public LLVMLegalizationPattern<SplatOp> {
using LLVMLegalizationPattern<SplatOp>::LLVMLegalizationPattern;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto splatOp = cast<SplatOp>(op);
VectorType resultType = splatOp.getType().dyn_cast<VectorType>();
@@ -1721,7 +1724,7 @@ struct SplatOpLowering : public LLVMLegalizationPattern<SplatOp> {
// First insert it into an undef vector so we can shuffle it.
auto vectorType = lowering.convertType(splatOp.getType());
- Value *undef = rewriter.create<LLVM::UndefOp>(op->getLoc(), vectorType);
+ ValuePtr undef = rewriter.create<LLVM::UndefOp>(op->getLoc(), vectorType);
auto zero = rewriter.create<LLVM::ConstantOp>(
op->getLoc(), lowering.convertType(rewriter.getIntegerType(32)),
rewriter.getZeroAttr(rewriter.getIntegerType(32)));
@@ -1746,7 +1749,7 @@ struct SplatNdOpLowering : public LLVMLegalizationPattern<SplatOp> {
using LLVMLegalizationPattern<SplatOp>::LLVMLegalizationPattern;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto splatOp = cast<SplatOp>(op);
OperandAdaptor<SplatOp> adaptor(operands);
@@ -1763,16 +1766,16 @@ struct SplatNdOpLowering : public LLVMLegalizationPattern<SplatOp> {
return matchFailure();
// Construct returned value.
- Value *desc = rewriter.create<LLVM::UndefOp>(loc, llvmArrayTy);
+ ValuePtr desc = rewriter.create<LLVM::UndefOp>(loc, llvmArrayTy);
// Construct a 1-D vector with the splatted value that we insert in all the
// places within the returned descriptor.
- Value *vdesc = rewriter.create<LLVM::UndefOp>(loc, llvmVectorTy);
+ ValuePtr vdesc = rewriter.create<LLVM::UndefOp>(loc, llvmVectorTy);
auto zero = rewriter.create<LLVM::ConstantOp>(
loc, lowering.convertType(rewriter.getIntegerType(32)),
rewriter.getZeroAttr(rewriter.getIntegerType(32)));
- Value *v = rewriter.create<LLVM::InsertElementOp>(loc, llvmVectorTy, vdesc,
- adaptor.input(), zero);
+ ValuePtr v = rewriter.create<LLVM::InsertElementOp>(
+ loc, llvmVectorTy, vdesc, adaptor.input(), zero);
// Shuffle the value across the desired number of elements.
int64_t width = resultType.getDimSize(resultType.getRank() - 1);
@@ -1800,21 +1803,21 @@ struct SubViewOpLowering : public LLVMLegalizationPattern<SubViewOp> {
using LLVMLegalizationPattern<SubViewOp>::LLVMLegalizationPattern;
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto loc = op->getLoc();
auto viewOp = cast<SubViewOp>(op);
// TODO(b/144779634, ravishankarm) : After Tblgen is adapted to support
// having multiple variadic operands where each operand can have different
// number of entries, clean all of this up.
- SmallVector<Value *, 2> dynamicOffsets(
+ SmallVector<ValuePtr, 2> dynamicOffsets(
std::next(operands.begin()),
std::next(operands.begin(), 1 + viewOp.getNumOffsets()));
- SmallVector<Value *, 2> dynamicSizes(
+ SmallVector<ValuePtr, 2> dynamicSizes(
std::next(operands.begin(), 1 + viewOp.getNumOffsets()),
std::next(operands.begin(),
1 + viewOp.getNumOffsets() + viewOp.getNumSizes()));
- SmallVector<Value *, 2> dynamicStrides(
+ SmallVector<ValuePtr, 2> dynamicStrides(
std::next(operands.begin(),
1 + viewOp.getNumOffsets() + viewOp.getNumSizes()),
operands.end());
@@ -1851,8 +1854,8 @@ struct SubViewOpLowering : public LLVMLegalizationPattern<SubViewOp> {
auto targetMemRef = MemRefDescriptor::undef(rewriter, loc, targetDescTy);
// Copy the buffer pointer from the old descriptor to the new one.
- Value *extracted = sourceMemRef.allocatedPtr(rewriter, loc);
- Value *bitcastPtr = rewriter.create<LLVM::BitcastOp>(
+ ValuePtr extracted = sourceMemRef.allocatedPtr(rewriter, loc);
+ ValuePtr bitcastPtr = rewriter.create<LLVM::BitcastOp>(
loc, targetElementTy.getPointerTo(), extracted);
targetMemRef.setAllocatedPtr(rewriter, loc, bitcastPtr);
@@ -1862,7 +1865,7 @@ struct SubViewOpLowering : public LLVMLegalizationPattern<SubViewOp> {
targetMemRef.setAlignedPtr(rewriter, loc, bitcastPtr);
// Extract strides needed to compute offset.
- SmallVector<Value *, 4> strideValues;
+ SmallVector<ValuePtr, 4> strideValues;
strideValues.reserve(viewMemRefType.getRank());
for (int i = 0, e = viewMemRefType.getRank(); i < e; ++i)
strideValues.push_back(sourceMemRef.stride(rewriter, loc, i));
@@ -1879,9 +1882,9 @@ struct SubViewOpLowering : public LLVMLegalizationPattern<SubViewOp> {
}
// Offset.
- Value *baseOffset = sourceMemRef.offset(rewriter, loc);
+ ValuePtr baseOffset = sourceMemRef.offset(rewriter, loc);
for (int i = 0, e = viewMemRefType.getRank(); i < e; ++i) {
- Value *min = dynamicOffsets[i];
+ ValuePtr min = dynamicOffsets[i];
baseOffset = rewriter.create<LLVM::AddOp>(
loc, baseOffset,
rewriter.create<LLVM::MulOp>(loc, min, strideValues[i]));
@@ -1891,7 +1894,7 @@ struct SubViewOpLowering : public LLVMLegalizationPattern<SubViewOp> {
// Update sizes and strides.
for (int i = viewMemRefType.getRank() - 1; i >= 0; --i) {
targetMemRef.setSize(rewriter, loc, i, dynamicSizes[i]);
- Value *newStride;
+ ValuePtr newStride;
if (dynamicStrides.empty())
newStride = rewriter.create<LLVM::ConstantOp>(
loc, llvmIndexType, rewriter.getI64IntegerAttr(strides[i]));
@@ -1916,9 +1919,9 @@ struct ViewOpLowering : public LLVMLegalizationPattern<ViewOp> {
// Build and return the value for the idx^th shape dimension, either by
// returning the constant shape dimension or counting the proper dynamic size.
- Value *getSize(ConversionPatternRewriter &rewriter, Location loc,
- ArrayRef<int64_t> shape, ArrayRef<Value *> dynamicSizes,
- unsigned idx) const {
+ ValuePtr getSize(ConversionPatternRewriter &rewriter, Location loc,
+ ArrayRef<int64_t> shape, ArrayRef<ValuePtr> dynamicSizes,
+ unsigned idx) const {
assert(idx < shape.size());
if (!ShapedType::isDynamic(shape[idx]))
return createIndexConstant(rewriter, loc, shape[idx]);
@@ -1933,9 +1936,9 @@ struct ViewOpLowering : public LLVMLegalizationPattern<ViewOp> {
// or by computing the dynamic stride from the current `runningStride` and
// `nextSize`. The caller should keep a running stride and update it with the
// result returned by this function.
- Value *getStride(ConversionPatternRewriter &rewriter, Location loc,
- ArrayRef<int64_t> strides, Value *nextSize,
- Value *runningStride, unsigned idx) const {
+ ValuePtr getStride(ConversionPatternRewriter &rewriter, Location loc,
+ ArrayRef<int64_t> strides, ValuePtr nextSize,
+ ValuePtr runningStride, unsigned idx) const {
assert(idx < strides.size());
if (strides[idx] != MemRefType::getDynamicStrideOrOffset())
return createIndexConstant(rewriter, loc, strides[idx]);
@@ -1948,7 +1951,7 @@ struct ViewOpLowering : public LLVMLegalizationPattern<ViewOp> {
}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto loc = op->getLoc();
auto viewOp = cast<ViewOp>(op);
@@ -1975,8 +1978,8 @@ struct ViewOpLowering : public LLVMLegalizationPattern<ViewOp> {
auto targetMemRef = MemRefDescriptor::undef(rewriter, loc, targetDescTy);
// Field 1: Copy the allocated pointer, used for malloc/free.
- Value *extracted = sourceMemRef.allocatedPtr(rewriter, loc);
- Value *bitcastPtr = rewriter.create<LLVM::BitcastOp>(
+ ValuePtr extracted = sourceMemRef.allocatedPtr(rewriter, loc);
+ ValuePtr bitcastPtr = rewriter.create<LLVM::BitcastOp>(
loc, targetElementTy.getPointerTo(), extracted);
targetMemRef.setAllocatedPtr(rewriter, loc, bitcastPtr);
@@ -1993,10 +1996,10 @@ struct ViewOpLowering : public LLVMLegalizationPattern<ViewOp> {
auto sizeAndOffsetOperands = adaptor.operands();
assert(llvm::size(sizeAndOffsetOperands) ==
numDynamicSizes + (hasDynamicOffset ? 1 : 0));
- Value *baseOffset = !hasDynamicOffset
- ? createIndexConstant(rewriter, loc, offset)
- // TODO(ntv): better adaptor.
- : sizeAndOffsetOperands.front();
+ ValuePtr baseOffset = !hasDynamicOffset
+ ? createIndexConstant(rewriter, loc, offset)
+ // TODO(ntv): better adaptor.
+ : sizeAndOffsetOperands.front();
targetMemRef.setOffset(rewriter, loc, baseOffset);
// Early exit for 0-D corner case.
@@ -2007,14 +2010,14 @@ struct ViewOpLowering : public LLVMLegalizationPattern<ViewOp> {
if (strides.back() != 1)
return op->emitWarning("cannot cast to non-contiguous shape"),
matchFailure();
- Value *stride = nullptr, *nextSize = nullptr;
+ ValuePtr stride = nullptr, nextSize = nullptr;
// Drop the dynamic stride from the operand list, if present.
- ArrayRef<Value *> sizeOperands(sizeAndOffsetOperands);
+ ArrayRef<ValuePtr> sizeOperands(sizeAndOffsetOperands);
if (hasDynamicOffset)
sizeOperands = sizeOperands.drop_front();
for (int i = viewMemRefType.getRank() - 1; i >= 0; --i) {
// Update size.
- Value *size =
+ ValuePtr size =
getSize(rewriter, loc, viewMemRefType.getShape(), sizeOperands, i);
targetMemRef.setSize(rewriter, loc, i, size);
// Update stride.
@@ -2058,7 +2061,7 @@ static void ensureDistinctSuccessors(Block &bb) {
auto *dummyBlock = new Block();
bb.getParent()->push_back(dummyBlock);
auto builder = OpBuilder(dummyBlock);
- SmallVector<Value *, 8> operands(
+ SmallVector<ValuePtr, 8> operands(
terminator->getSuccessorOperands(*position));
builder.create<BranchOp>(terminator->getLoc(), successor.first, operands);
terminator->setSuccessor(dummyBlock, *position);
@@ -2179,33 +2182,33 @@ Type LLVMTypeConverter::packFunctionResults(ArrayRef<Type> types) {
return LLVM::LLVMType::getStructTy(llvmDialect, resultTypes);
}
-Value *LLVMTypeConverter::promoteOneMemRefDescriptor(Location loc,
- Value *operand,
- OpBuilder &builder) {
+ValuePtr LLVMTypeConverter::promoteOneMemRefDescriptor(Location loc,
+ ValuePtr operand,
+ OpBuilder &builder) {
auto *context = builder.getContext();
auto int64Ty = LLVM::LLVMType::getInt64Ty(getDialect());
auto indexType = IndexType::get(context);
// Alloca with proper alignment. We do not expect optimizations of this
// alloca op and so we omit allocating at the entry block.
auto ptrType = operand->getType().cast<LLVM::LLVMType>().getPointerTo();
- Value *one = builder.create<LLVM::ConstantOp>(loc, int64Ty,
- IntegerAttr::get(indexType, 1));
- Value *allocated =
+ ValuePtr one = builder.create<LLVM::ConstantOp>(
+ loc, int64Ty, IntegerAttr::get(indexType, 1));
+ ValuePtr allocated =
builder.create<LLVM::AllocaOp>(loc, ptrType, one, /*alignment=*/0);
// Store into the alloca'ed descriptor.
builder.create<LLVM::StoreOp>(loc, operand, allocated);
return allocated;
}
-SmallVector<Value *, 4>
+SmallVector<ValuePtr, 4>
LLVMTypeConverter::promoteMemRefDescriptors(Location loc, ValueRange opOperands,
ValueRange operands,
OpBuilder &builder) {
- SmallVector<Value *, 4> promotedOperands;
+ SmallVector<ValuePtr, 4> promotedOperands;
promotedOperands.reserve(operands.size());
for (auto it : llvm::zip(opOperands, operands)) {
- auto *operand = std::get<0>(it);
- auto *llvmOperand = std::get<1>(it);
+ auto operand = std::get<0>(it);
+ auto llvmOperand = std::get<1>(it);
if (!operand->getType().isa<MemRefType>() &&
!operand->getType().isa<UnrankedMemRefType>()) {
promotedOperands.push_back(operand);
diff --git a/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp b/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp
index a14271efbb6..f7b0c9cb9bc 100644
--- a/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp
+++ b/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp
@@ -44,7 +44,7 @@ public:
using SPIRVOpLowering<ConstantOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(ConstantOp constIndexOp, ArrayRef<Value *> operands,
+ matchAndRewrite(ConstantOp constIndexOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -54,7 +54,7 @@ public:
using SPIRVOpLowering<CmpIOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(CmpIOp cmpIOp, ArrayRef<Value *> operands,
+ matchAndRewrite(CmpIOp cmpIOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -70,7 +70,7 @@ public:
using SPIRVOpLowering<StdOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(StdOp operation, ArrayRef<Value *> operands,
+ matchAndRewrite(StdOp operation, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto resultType =
this->typeConverter.convertType(operation.getResult()->getType());
@@ -89,7 +89,7 @@ public:
using SPIRVOpLowering<LoadOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(LoadOp loadOp, ArrayRef<Value *> operands,
+ matchAndRewrite(LoadOp loadOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -100,7 +100,7 @@ public:
using SPIRVOpLowering<ReturnOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(ReturnOp returnOp, ArrayRef<Value *> operands,
+ matchAndRewrite(ReturnOp returnOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -110,7 +110,7 @@ class SelectOpConversion final : public SPIRVOpLowering<SelectOp> {
public:
using SPIRVOpLowering<SelectOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(SelectOp op, ArrayRef<Value *> operands,
+ matchAndRewrite(SelectOp op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -123,7 +123,7 @@ public:
using SPIRVOpLowering<StoreOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(StoreOp storeOp, ArrayRef<Value *> operands,
+ matchAndRewrite(StoreOp storeOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -141,7 +141,8 @@ public:
spirv::AccessChainOp getElementPtr(OpBuilder &builder,
SPIRVTypeConverter &typeConverter,
Location loc, MemRefType origBaseType,
- Value *basePtr, ArrayRef<Value *> indices) {
+ ValuePtr basePtr,
+ ArrayRef<ValuePtr> indices) {
// Get base and offset of the MemRefType and verify they are static.
int64_t offset;
SmallVector<int64_t, 4> strides;
@@ -152,18 +153,18 @@ spirv::AccessChainOp getElementPtr(OpBuilder &builder,
auto indexType = typeConverter.getIndexType(builder.getContext());
- Value *ptrLoc = nullptr;
+ ValuePtr ptrLoc = nullptr;
assert(indices.size() == strides.size());
for (auto index : enumerate(indices)) {
- Value *strideVal = builder.create<spirv::ConstantOp>(
+ ValuePtr strideVal = builder.create<spirv::ConstantOp>(
loc, indexType, IntegerAttr::get(indexType, strides[index.index()]));
- Value *update =
+ ValuePtr update =
builder.create<spirv::IMulOp>(loc, strideVal, index.value());
ptrLoc =
(ptrLoc ? builder.create<spirv::IAddOp>(loc, ptrLoc, update).getResult()
: update);
}
- SmallVector<Value *, 2> linearizedIndices;
+ SmallVector<ValuePtr, 2> linearizedIndices;
// Add a '0' at the start to index into the struct.
linearizedIndices.push_back(builder.create<spirv::ConstantOp>(
loc, indexType, IntegerAttr::get(indexType, 0)));
@@ -176,7 +177,7 @@ spirv::AccessChainOp getElementPtr(OpBuilder &builder,
//===----------------------------------------------------------------------===//
PatternMatchResult ConstantIndexOpConversion::matchAndRewrite(
- ConstantOp constIndexOp, ArrayRef<Value *> operands,
+ ConstantOp constIndexOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
if (!constIndexOp.getResult()->getType().isa<IndexType>()) {
return matchFailure();
@@ -210,7 +211,7 @@ PatternMatchResult ConstantIndexOpConversion::matchAndRewrite(
//===----------------------------------------------------------------------===//
PatternMatchResult
-CmpIOpConversion::matchAndRewrite(CmpIOp cmpIOp, ArrayRef<Value *> operands,
+CmpIOpConversion::matchAndRewrite(CmpIOp cmpIOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
CmpIOpOperandAdaptor cmpIOpOperands(operands);
@@ -242,7 +243,7 @@ CmpIOpConversion::matchAndRewrite(CmpIOp cmpIOp, ArrayRef<Value *> operands,
//===----------------------------------------------------------------------===//
PatternMatchResult
-LoadOpConversion::matchAndRewrite(LoadOp loadOp, ArrayRef<Value *> operands,
+LoadOpConversion::matchAndRewrite(LoadOp loadOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
LoadOpOperandAdaptor loadOperands(operands);
auto loadPtr = getElementPtr(rewriter, typeConverter, loadOp.getLoc(),
@@ -260,7 +261,7 @@ LoadOpConversion::matchAndRewrite(LoadOp loadOp, ArrayRef<Value *> operands,
PatternMatchResult
ReturnOpConversion::matchAndRewrite(ReturnOp returnOp,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
if (returnOp.getNumOperands()) {
return matchFailure();
@@ -274,7 +275,7 @@ ReturnOpConversion::matchAndRewrite(ReturnOp returnOp,
//===----------------------------------------------------------------------===//
PatternMatchResult
-SelectOpConversion::matchAndRewrite(SelectOp op, ArrayRef<Value *> operands,
+SelectOpConversion::matchAndRewrite(SelectOp op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
SelectOpOperandAdaptor selectOperands(operands);
rewriter.replaceOpWithNewOp<spirv::SelectOp>(op, selectOperands.condition(),
@@ -288,7 +289,7 @@ SelectOpConversion::matchAndRewrite(SelectOp op, ArrayRef<Value *> operands,
//===----------------------------------------------------------------------===//
PatternMatchResult
-StoreOpConversion::matchAndRewrite(StoreOp storeOp, ArrayRef<Value *> operands,
+StoreOpConversion::matchAndRewrite(StoreOp storeOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
StoreOpOperandAdaptor storeOperands(operands);
auto storePtr =
diff --git a/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.cpp b/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.cpp
index c0c56a3b0b2..113789abe8a 100644
--- a/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.cpp
+++ b/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.cpp
@@ -37,7 +37,7 @@ public:
using SPIRVOpLowering<FuncOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(FuncOp funcOp, ArrayRef<Value *> operands,
+ matchAndRewrite(FuncOp funcOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -49,7 +49,7 @@ class ConvertStandardToSPIRVPass
} // namespace
PatternMatchResult
-FuncOpConversion::matchAndRewrite(FuncOp funcOp, ArrayRef<Value *> operands,
+FuncOpConversion::matchAndRewrite(FuncOp funcOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
auto fnType = funcOp.getType();
if (fnType.getNumResults()) {
diff --git a/mlir/lib/Conversion/StandardToSPIRV/LegalizeStandardForSPIRV.cpp b/mlir/lib/Conversion/StandardToSPIRV/LegalizeStandardForSPIRV.cpp
index 4469c2802a8..2e1a7f09ff8 100644
--- a/mlir/lib/Conversion/StandardToSPIRV/LegalizeStandardForSPIRV.cpp
+++ b/mlir/lib/Conversion/StandardToSPIRV/LegalizeStandardForSPIRV.cpp
@@ -69,7 +69,7 @@ public:
static LogicalResult
resolveSourceIndices(Location loc, PatternRewriter &rewriter,
SubViewOp subViewOp, ValueRange indices,
- SmallVectorImpl<Value *> &sourceIndices) {
+ SmallVectorImpl<ValuePtr> &sourceIndices) {
// TODO: Aborting when the offsets are static. There might be a way to fold
// the subview op with load even if the offsets have been canonicalized
// away.
@@ -77,7 +77,7 @@ resolveSourceIndices(Location loc, PatternRewriter &rewriter,
return failure();
ValueRange opOffsets = subViewOp.offsets();
- SmallVector<Value *, 2> opStrides;
+ SmallVector<ValuePtr, 2> opStrides;
if (subViewOp.getNumStrides()) {
// If the strides are dynamic, get the stride operands.
opStrides = llvm::to_vector<2>(subViewOp.strides());
@@ -124,7 +124,7 @@ LoadOpOfSubViewFolder::matchAndRewrite(LoadOp loadOp,
if (!subViewOp) {
return matchFailure();
}
- SmallVector<Value *, 4> sourceIndices;
+ SmallVector<ValuePtr, 4> sourceIndices;
if (failed(resolveSourceIndices(loadOp.getLoc(), rewriter, subViewOp,
loadOp.indices(), sourceIndices)))
return matchFailure();
@@ -146,7 +146,7 @@ StoreOpOfSubViewFolder::matchAndRewrite(StoreOp storeOp,
if (!subViewOp) {
return matchFailure();
}
- SmallVector<Value *, 4> sourceIndices;
+ SmallVector<ValuePtr, 4> sourceIndices;
if (failed(resolveSourceIndices(storeOp.getLoc(), rewriter, subViewOp,
storeOp.indices(), sourceIndices)))
return matchFailure();
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 9ec8ec6f88d..5099cb01bbc 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -62,9 +62,10 @@ static VectorType reducedVectorTypeBack(VectorType tp) {
}
// Helper that picks the proper sequence for inserting.
-static Value *insertOne(ConversionPatternRewriter &rewriter,
- LLVMTypeConverter &lowering, Location loc, Value *val1,
- Value *val2, Type llvmType, int64_t rank, int64_t pos) {
+static ValuePtr insertOne(ConversionPatternRewriter &rewriter,
+ LLVMTypeConverter &lowering, Location loc,
+ ValuePtr val1, ValuePtr val2, Type llvmType,
+ int64_t rank, int64_t pos) {
if (rank == 1) {
auto idxType = rewriter.getIndexType();
auto constant = rewriter.create<LLVM::ConstantOp>(
@@ -78,9 +79,10 @@ static Value *insertOne(ConversionPatternRewriter &rewriter,
}
// Helper that picks the proper sequence for extracting.
-static Value *extractOne(ConversionPatternRewriter &rewriter,
- LLVMTypeConverter &lowering, Location loc, Value *val,
- Type llvmType, int64_t rank, int64_t pos) {
+static ValuePtr extractOne(ConversionPatternRewriter &rewriter,
+ LLVMTypeConverter &lowering, Location loc,
+ ValuePtr val, Type llvmType, int64_t rank,
+ int64_t pos) {
if (rank == 1) {
auto idxType = rewriter.getIndexType();
auto constant = rewriter.create<LLVM::ConstantOp>(
@@ -101,7 +103,7 @@ public:
typeConverter) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto broadcastOp = cast<vector::BroadcastOp>(op);
VectorType dstVectorType = broadcastOp.getVectorType();
@@ -129,9 +131,9 @@ private:
// ops once all insert/extract/shuffle operations
// are available with lowering implemention.
//
- Value *expandRanks(Value *value, Location loc, VectorType srcVectorType,
- VectorType dstVectorType,
- ConversionPatternRewriter &rewriter) const {
+ ValuePtr expandRanks(ValuePtr value, Location loc, VectorType srcVectorType,
+ VectorType dstVectorType,
+ ConversionPatternRewriter &rewriter) const {
assert((dstVectorType != nullptr) && "invalid result type in broadcast");
// Determine rank of source and destination.
int64_t srcRank = srcVectorType ? srcVectorType.getRank() : 0;
@@ -168,23 +170,24 @@ private:
// becomes:
// x = [s,s]
// v = [x,x,x,x]
- Value *duplicateOneRank(Value *value, Location loc, VectorType srcVectorType,
- VectorType dstVectorType, int64_t rank, int64_t dim,
- ConversionPatternRewriter &rewriter) const {
+ ValuePtr duplicateOneRank(ValuePtr value, Location loc,
+ VectorType srcVectorType, VectorType dstVectorType,
+ int64_t rank, int64_t dim,
+ ConversionPatternRewriter &rewriter) const {
Type llvmType = lowering.convertType(dstVectorType);
assert((llvmType != nullptr) && "unlowerable vector type");
if (rank == 1) {
- Value *undef = rewriter.create<LLVM::UndefOp>(loc, llvmType);
- Value *expand =
+ ValuePtr undef = rewriter.create<LLVM::UndefOp>(loc, llvmType);
+ ValuePtr expand =
insertOne(rewriter, lowering, loc, undef, value, llvmType, rank, 0);
SmallVector<int32_t, 4> zeroValues(dim, 0);
return rewriter.create<LLVM::ShuffleVectorOp>(
loc, expand, undef, rewriter.getI32ArrayAttr(zeroValues));
}
- Value *expand =
+ ValuePtr expand =
expandRanks(value, loc, srcVectorType,
reducedVectorTypeFront(dstVectorType), rewriter);
- Value *result = rewriter.create<LLVM::UndefOp>(loc, llvmType);
+ ValuePtr result = rewriter.create<LLVM::UndefOp>(loc, llvmType);
for (int64_t d = 0; d < dim; ++d) {
result =
insertOne(rewriter, lowering, loc, result, expand, llvmType, rank, d);
@@ -209,19 +212,20 @@ private:
// y = broadcast w[1][0] : vector<2xf32> to vector <2x2xf32>
// a = [x, y]
// etc.
- Value *stretchOneRank(Value *value, Location loc, VectorType srcVectorType,
- VectorType dstVectorType, int64_t rank, int64_t dim,
- ConversionPatternRewriter &rewriter) const {
+ ValuePtr stretchOneRank(ValuePtr value, Location loc,
+ VectorType srcVectorType, VectorType dstVectorType,
+ int64_t rank, int64_t dim,
+ ConversionPatternRewriter &rewriter) const {
Type llvmType = lowering.convertType(dstVectorType);
assert((llvmType != nullptr) && "unlowerable vector type");
- Value *result = rewriter.create<LLVM::UndefOp>(loc, llvmType);
+ ValuePtr result = rewriter.create<LLVM::UndefOp>(loc, llvmType);
bool atStretch = dim != srcVectorType.getDimSize(0);
if (rank == 1) {
assert(atStretch);
Type redLlvmType = lowering.convertType(dstVectorType.getElementType());
- Value *one =
+ ValuePtr one =
extractOne(rewriter, lowering, loc, value, redLlvmType, rank, 0);
- Value *expand =
+ ValuePtr expand =
insertOne(rewriter, lowering, loc, result, one, llvmType, rank, 0);
SmallVector<int32_t, 4> zeroValues(dim, 0);
return rewriter.create<LLVM::ShuffleVectorOp>(
@@ -232,9 +236,9 @@ private:
Type redLlvmType = lowering.convertType(redSrcType);
for (int64_t d = 0; d < dim; ++d) {
int64_t pos = atStretch ? 0 : d;
- Value *one =
+ ValuePtr one =
extractOne(rewriter, lowering, loc, value, redLlvmType, rank, pos);
- Value *expand = expandRanks(one, loc, redSrcType, redDstType, rewriter);
+ ValuePtr expand = expandRanks(one, loc, redSrcType, redDstType, rewriter);
result =
insertOne(rewriter, lowering, loc, result, expand, llvmType, rank, d);
}
@@ -250,7 +254,7 @@ public:
typeConverter) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto loc = op->getLoc();
auto adaptor = vector::ShuffleOpOperandAdaptor(operands);
@@ -274,23 +278,23 @@ public:
// For rank 1, where both operands have *exactly* the same vector type,
// there is direct shuffle support in LLVM. Use it!
if (rank == 1 && v1Type == v2Type) {
- Value *shuffle = rewriter.create<LLVM::ShuffleVectorOp>(
+ ValuePtr shuffle = rewriter.create<LLVM::ShuffleVectorOp>(
loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
rewriter.replaceOp(op, shuffle);
return matchSuccess();
}
// For all other cases, insert the individual values individually.
- Value *insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
+ ValuePtr insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
int64_t insPos = 0;
for (auto en : llvm::enumerate(maskArrayAttr)) {
int64_t extPos = en.value().cast<IntegerAttr>().getInt();
- Value *value = adaptor.v1();
+ ValuePtr value = adaptor.v1();
if (extPos >= v1Dim) {
extPos -= v1Dim;
value = adaptor.v2();
}
- Value *extract =
+ ValuePtr extract =
extractOne(rewriter, lowering, loc, value, llvmType, rank, extPos);
insert = insertOne(rewriter, lowering, loc, insert, extract, llvmType,
rank, insPos++);
@@ -308,7 +312,7 @@ public:
typeConverter) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto adaptor = vector::ExtractElementOpOperandAdaptor(operands);
auto extractEltOp = cast<vector::ExtractElementOp>(op);
@@ -333,7 +337,7 @@ public:
typeConverter) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto loc = op->getLoc();
auto adaptor = vector::ExtractOpOperandAdaptor(operands);
@@ -349,7 +353,7 @@ public:
// One-shot extraction of vector from array (only requires extractvalue).
if (resultType.isa<VectorType>()) {
- Value *extracted = rewriter.create<LLVM::ExtractValueOp>(
+ ValuePtr extracted = rewriter.create<LLVM::ExtractValueOp>(
loc, llvmResultType, adaptor.vector(), positionArrayAttr);
rewriter.replaceOp(op, extracted);
return matchSuccess();
@@ -357,7 +361,7 @@ public:
// Potential extraction of 1-D vector from array.
auto *context = op->getContext();
- Value *extracted = adaptor.vector();
+ ValuePtr extracted = adaptor.vector();
auto positionAttrs = positionArrayAttr.getValue();
if (positionAttrs.size() > 1) {
auto oneDVectorType = reducedVectorTypeBack(vectorType);
@@ -388,7 +392,7 @@ public:
typeConverter) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto adaptor = vector::InsertElementOpOperandAdaptor(operands);
auto insertEltOp = cast<vector::InsertElementOp>(op);
@@ -413,7 +417,7 @@ public:
typeConverter) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto loc = op->getLoc();
auto adaptor = vector::InsertOpOperandAdaptor(operands);
@@ -429,7 +433,7 @@ public:
// One-shot insertion of a vector into an array (only requires insertvalue).
if (sourceType.isa<VectorType>()) {
- Value *inserted = rewriter.create<LLVM::InsertValueOp>(
+ ValuePtr inserted = rewriter.create<LLVM::InsertValueOp>(
loc, llvmResultType, adaptor.dest(), adaptor.source(),
positionArrayAttr);
rewriter.replaceOp(op, inserted);
@@ -438,7 +442,7 @@ public:
// Potential extraction of 1-D vector from array.
auto *context = op->getContext();
- Value *extracted = adaptor.dest();
+ ValuePtr extracted = adaptor.dest();
auto positionAttrs = positionArrayAttr.getValue();
auto position = positionAttrs.back().cast<IntegerAttr>();
auto oneDVectorType = destVectorType;
@@ -454,7 +458,7 @@ public:
// Insertion of an element into a 1-D LLVM vector.
auto i64Type = LLVM::LLVMType::getInt64Ty(lowering.getDialect());
auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
- Value *inserted = rewriter.create<LLVM::InsertElementOp>(
+ ValuePtr inserted = rewriter.create<LLVM::InsertElementOp>(
loc, lowering.convertType(oneDVectorType), extracted, adaptor.source(),
constant);
@@ -480,7 +484,7 @@ public:
typeConverter) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto loc = op->getLoc();
auto adaptor = vector::OuterProductOpOperandAdaptor(operands);
@@ -491,10 +495,10 @@ public:
auto rankRHS = vRHS.getUnderlyingType()->getVectorNumElements();
auto llvmArrayOfVectType = lowering.convertType(
cast<vector::OuterProductOp>(op).getResult()->getType());
- Value *desc = rewriter.create<LLVM::UndefOp>(loc, llvmArrayOfVectType);
- Value *a = adaptor.lhs(), *b = adaptor.rhs();
- Value *acc = adaptor.acc().empty() ? nullptr : adaptor.acc().front();
- SmallVector<Value *, 8> lhs, accs;
+ ValuePtr desc = rewriter.create<LLVM::UndefOp>(loc, llvmArrayOfVectType);
+ ValuePtr a = adaptor.lhs(), b = adaptor.rhs();
+ ValuePtr acc = adaptor.acc().empty() ? nullptr : adaptor.acc().front();
+ SmallVector<ValuePtr, 8> lhs, accs;
lhs.reserve(rankLHS);
accs.reserve(rankLHS);
for (unsigned d = 0, e = rankLHS; d < e; ++d) {
@@ -502,7 +506,7 @@ public:
auto attr = rewriter.getI32IntegerAttr(d);
SmallVector<Attribute, 4> bcastAttr(rankRHS, attr);
auto bcastArrayAttr = ArrayAttr::get(bcastAttr, ctx);
- Value *aD = nullptr, *accD = nullptr;
+ ValuePtr aD = nullptr, accD = nullptr;
// 1. Broadcast the element a[d] into vector aD.
aD = rewriter.create<LLVM::ShuffleVectorOp>(loc, a, a, bcastArrayAttr);
// 2. If acc is present, extract 1-d vector acc[d] into accD.
@@ -510,7 +514,7 @@ public:
accD = rewriter.create<LLVM::ExtractValueOp>(
loc, vRHS, acc, rewriter.getI64ArrayAttr(d));
// 3. Compute aD outer b (plus accD, if relevant).
- Value *aOuterbD =
+ ValuePtr aOuterbD =
accD ? rewriter.create<LLVM::FMulAddOp>(loc, vRHS, aD, b, accD)
.getResult()
: rewriter.create<LLVM::FMulOp>(loc, aD, b).getResult();
@@ -532,7 +536,7 @@ public:
typeConverter) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto loc = op->getLoc();
vector::TypeCastOp castOp = cast<vector::TypeCastOp>(op);
@@ -581,12 +585,12 @@ public:
auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
Type llvmTargetElementTy = desc.getElementType();
// Set allocated ptr.
- Value *allocated = sourceMemRef.allocatedPtr(rewriter, loc);
+ ValuePtr allocated = sourceMemRef.allocatedPtr(rewriter, loc);
allocated =
rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
desc.setAllocatedPtr(rewriter, loc, allocated);
// Set aligned ptr.
- Value *ptr = sourceMemRef.alignedPtr(rewriter, loc);
+ ValuePtr ptr = sourceMemRef.alignedPtr(rewriter, loc);
ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
desc.setAlignedPtr(rewriter, loc, ptr);
// Fill offset 0.
@@ -632,7 +636,7 @@ public:
// TODO(ajcbik): rely solely on libc in future? something else?
//
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto printOp = cast<vector::PrintOp>(op);
auto adaptor = vector::PrintOpOperandAdaptor(operands);
@@ -662,7 +666,7 @@ public:
private:
void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
- Value *value, VectorType vectorType, Operation *printer,
+ ValuePtr value, VectorType vectorType, Operation *printer,
int64_t rank) const {
Location loc = op->getLoc();
if (rank == 0) {
@@ -678,7 +682,7 @@ private:
rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
auto llvmType = lowering.convertType(
rank > 1 ? reducedType : vectorType.getElementType());
- Value *nestedVal =
+ ValuePtr nestedVal =
extractOne(rewriter, lowering, loc, value, llvmType, rank, d);
emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1);
if (d != dim - 1)
diff --git a/mlir/lib/Dialect/AffineOps/AffineOps.cpp b/mlir/lib/Dialect/AffineOps/AffineOps.cpp
index ef4060d4302..3a21de389c7 100644
--- a/mlir/lib/Dialect/AffineOps/AffineOps.cpp
+++ b/mlir/lib/Dialect/AffineOps/AffineOps.cpp
@@ -115,8 +115,8 @@ static bool isFunctionRegion(Region *region) {
/// A utility function to check if a value is defined at the top level of a
/// function. A value of index type defined at the top level is always a valid
/// symbol.
-bool mlir::isTopLevelValue(Value *value) {
- if (auto *arg = dyn_cast<BlockArgument>(value))
+bool mlir::isTopLevelValue(ValuePtr value) {
+ if (auto arg = dyn_cast<BlockArgument>(value))
return isFunctionRegion(arg->getOwner()->getParent());
return isFunctionRegion(value->getDefiningOp()->getParentRegion());
}
@@ -124,7 +124,7 @@ bool mlir::isTopLevelValue(Value *value) {
// Value can be used as a dimension id if it is valid as a symbol, or
// it is an induction variable, or it is a result of affine apply operation
// with dimension id arguments.
-bool mlir::isValidDim(Value *value) {
+bool mlir::isValidDim(ValuePtr value) {
// The value must be an index type.
if (!value->getType().isIndex())
return false;
@@ -184,7 +184,7 @@ static bool isDimOpValidSymbol(DimOp dimOp) {
// the top level, or it is a result of affine apply operation with symbol
// arguments, or a result of the dim op on a memref satisfying certain
// constraints.
-bool mlir::isValidSymbol(Value *value) {
+bool mlir::isValidSymbol(ValuePtr value) {
// The value must be an index type.
if (!value->getType().isIndex())
return false;
@@ -207,7 +207,7 @@ bool mlir::isValidSymbol(Value *value) {
// Returns true if 'value' is a valid index to an affine operation (e.g.
// affine.load, affine.store, affine.dma_start, affine.dma_wait).
// Returns false otherwise.
-static bool isValidAffineIndexOperand(Value *value) {
+static bool isValidAffineIndexOperand(ValuePtr value) {
return isValidDim(value) || isValidSymbol(value);
}
@@ -221,7 +221,7 @@ static LogicalResult
verifyDimAndSymbolIdentifiers(OpTy &op, Operation::operand_range operands,
unsigned numDims) {
unsigned opIt = 0;
- for (auto *operand : operands) {
+ for (auto operand : operands) {
if (opIt++ < numDims) {
if (!isValidDim(operand))
return op.emitOpError("operand cannot be used as a dimension id");
@@ -306,14 +306,14 @@ LogicalResult AffineApplyOp::verify() {
// its operands are valid dimension ids.
bool AffineApplyOp::isValidDim() {
return llvm::all_of(getOperands(),
- [](Value *op) { return mlir::isValidDim(op); });
+ [](ValuePtr op) { return mlir::isValidDim(op); });
}
// The result of the affine apply operation can be used as a symbol if all its
// operands are symbols.
bool AffineApplyOp::isValidSymbol() {
return llvm::all_of(getOperands(),
- [](Value *op) { return mlir::isValidSymbol(op); });
+ [](ValuePtr op) { return mlir::isValidSymbol(op); });
}
OpFoldResult AffineApplyOp::fold(ArrayRef<Attribute> operands) {
@@ -333,8 +333,8 @@ OpFoldResult AffineApplyOp::fold(ArrayRef<Attribute> operands) {
return result[0];
}
-AffineDimExpr AffineApplyNormalizer::renumberOneDim(Value *v) {
- DenseMap<Value *, unsigned>::iterator iterPos;
+AffineDimExpr AffineApplyNormalizer::renumberOneDim(ValuePtr v) {
+ DenseMap<ValuePtr, unsigned>::iterator iterPos;
bool inserted = false;
std::tie(iterPos, inserted) =
dimValueToPosition.insert(std::make_pair(v, dimValueToPosition.size()));
@@ -347,7 +347,7 @@ AffineDimExpr AffineApplyNormalizer::renumberOneDim(Value *v) {
AffineMap AffineApplyNormalizer::renumber(const AffineApplyNormalizer &other) {
SmallVector<AffineExpr, 8> dimRemapping;
- for (auto *v : other.reorderedDims) {
+ for (auto v : other.reorderedDims) {
auto kvp = other.dimValueToPosition.find(v);
if (dimRemapping.size() <= kvp->second)
dimRemapping.resize(kvp->second + 1);
@@ -371,7 +371,7 @@ AffineMap AffineApplyNormalizer::renumber(const AffineApplyNormalizer &other) {
// Gather the positions of the operands that are produced by an AffineApplyOp.
static llvm::SetVector<unsigned>
-indicesFromAffineApplyOp(ArrayRef<Value *> operands) {
+indicesFromAffineApplyOp(ArrayRef<ValuePtr> operands) {
llvm::SetVector<unsigned> res;
for (auto en : llvm::enumerate(operands))
if (isa_and_nonnull<AffineApplyOp>(en.value()->getDefiningOp()))
@@ -393,13 +393,13 @@ indicesFromAffineApplyOp(ArrayRef<Value *> operands) {
// results in better simplifications and foldings. But we should evaluate
// whether this behavior is what we really want after using more.
static AffineMap promoteComposedSymbolsAsDims(AffineMap map,
- ArrayRef<Value *> symbols) {
+ ArrayRef<ValuePtr> symbols) {
if (symbols.empty()) {
return map;
}
// Sanity check on symbols.
- for (auto *sym : symbols) {
+ for (auto sym : symbols) {
assert(isValidSymbol(sym) && "Expected only valid symbols");
(void)sym;
}
@@ -446,7 +446,7 @@ static AffineMap promoteComposedSymbolsAsDims(AffineMap map,
/// `(d0)[s0, s1] -> (d0 + s0 + s1)`.
///
/// The result is only equivalent to `(d0)[s0] -> (d0 + 2 * s0)` when
-/// applied to the same mlir::Value* for both s0 and s1.
+/// applied to the same mlir::Value for both s0 and s1.
/// As a consequence mathematical composition of AffineMap always concatenates
/// symbols.
///
@@ -462,7 +462,7 @@ static AffineMap promoteComposedSymbolsAsDims(AffineMap map,
/// benefit potentially big: simpler and more maintainable code for a
/// non-trivial, recursive, procedure.
AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
- ArrayRef<Value *> operands)
+ ArrayRef<ValuePtr> operands)
: AffineApplyNormalizer() {
static_assert(kMaxAffineApplyDepth > 0, "kMaxAffineApplyDepth must be > 0");
assert(map.getNumInputs() == operands.size() &&
@@ -495,7 +495,7 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
if (!furtherCompose) {
// 1. Only dispatch dims or symbols.
for (auto en : llvm::enumerate(operands)) {
- auto *t = en.value();
+ auto t = en.value();
assert(t->getType().isIndex());
bool isDim = (en.index() < map.getNumDims());
if (isDim) {
@@ -511,14 +511,14 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
assert(numDimsBeforeRewrite <= operands.size());
// 2. Compose AffineApplyOps and dispatch dims or symbols.
for (unsigned i = 0, e = operands.size(); i < e; ++i) {
- auto *t = operands[i];
+ auto t = operands[i];
auto affineApply = dyn_cast_or_null<AffineApplyOp>(t->getDefiningOp());
if (affineApply) {
// a. Compose affine.apply operations.
LLVM_DEBUG(affineApply.getOperation()->print(
dbgs() << "\nCompose AffineApplyOp recursively: "));
AffineMap affineApplyMap = affineApply.getAffineMap();
- SmallVector<Value *, 8> affineApplyOperands(
+ SmallVector<ValuePtr, 8> affineApplyOperands(
affineApply.getOperands().begin(), affineApply.getOperands().end());
AffineApplyNormalizer normalizer(affineApplyMap, affineApplyOperands);
@@ -569,8 +569,8 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
LLVM_DEBUG(dbgs() << "\n");
}
-void AffineApplyNormalizer::normalize(AffineMap *otherMap,
- SmallVectorImpl<Value *> *otherOperands) {
+void AffineApplyNormalizer::normalize(
+ AffineMap *otherMap, SmallVectorImpl<ValuePtr> *otherOperands) {
AffineApplyNormalizer other(*otherMap, *otherOperands);
*otherMap = renumber(other);
@@ -584,7 +584,7 @@ void AffineApplyNormalizer::normalize(AffineMap *otherMap,
/// on `map` and `operands` without creating an AffineApplyOp that needs to be
/// immediately deleted.
static void composeAffineMapAndOperands(AffineMap *map,
- SmallVectorImpl<Value *> *operands) {
+ SmallVectorImpl<ValuePtr> *operands) {
AffineApplyNormalizer normalizer(*map, *operands);
auto normalizedMap = normalizer.getAffineMap();
auto normalizedOperands = normalizer.getOperands();
@@ -595,8 +595,8 @@ static void composeAffineMapAndOperands(AffineMap *map,
}
void mlir::fullyComposeAffineMapAndOperands(
- AffineMap *map, SmallVectorImpl<Value *> *operands) {
- while (llvm::any_of(*operands, [](Value *v) {
+ AffineMap *map, SmallVectorImpl<ValuePtr> *operands) {
+ while (llvm::any_of(*operands, [](ValuePtr v) {
return isa_and_nonnull<AffineApplyOp>(v->getDefiningOp());
})) {
composeAffineMapAndOperands(map, operands);
@@ -605,9 +605,9 @@ void mlir::fullyComposeAffineMapAndOperands(
AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc,
AffineMap map,
- ArrayRef<Value *> operands) {
+ ArrayRef<ValuePtr> operands) {
AffineMap normalizedMap = map;
- SmallVector<Value *, 8> normalizedOperands(operands.begin(), operands.end());
+ SmallVector<ValuePtr, 8> normalizedOperands(operands.begin(), operands.end());
composeAffineMapAndOperands(&normalizedMap, &normalizedOperands);
assert(normalizedMap);
return b.create<AffineApplyOp>(loc, normalizedMap, normalizedOperands);
@@ -617,7 +617,7 @@ AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc,
// canonicalizes dims that are valid symbols into actual symbols.
template <class MapOrSet>
static void canonicalizePromotedSymbols(MapOrSet *mapOrSet,
- SmallVectorImpl<Value *> *operands) {
+ SmallVectorImpl<ValuePtr> *operands) {
if (!mapOrSet || operands->empty())
return;
@@ -625,9 +625,9 @@ static void canonicalizePromotedSymbols(MapOrSet *mapOrSet,
"map/set inputs must match number of operands");
auto *context = mapOrSet->getContext();
- SmallVector<Value *, 8> resultOperands;
+ SmallVector<ValuePtr, 8> resultOperands;
resultOperands.reserve(operands->size());
- SmallVector<Value *, 8> remappedSymbols;
+ SmallVector<ValuePtr, 8> remappedSymbols;
remappedSymbols.reserve(operands->size());
unsigned nextDim = 0;
unsigned nextSym = 0;
@@ -661,7 +661,7 @@ static void canonicalizePromotedSymbols(MapOrSet *mapOrSet,
template <class MapOrSet>
static void
canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet,
- SmallVectorImpl<Value *> *operands) {
+ SmallVectorImpl<ValuePtr> *operands) {
static_assert(std::is_same<MapOrSet, AffineMap>::value ||
std::is_same<MapOrSet, IntegerSet>::value,
"Argument must be either of AffineMap or IntegerSet type");
@@ -686,10 +686,10 @@ canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet,
auto *context = mapOrSet->getContext();
- SmallVector<Value *, 8> resultOperands;
+ SmallVector<ValuePtr, 8> resultOperands;
resultOperands.reserve(operands->size());
- llvm::SmallDenseMap<Value *, AffineExpr, 8> seenDims;
+ llvm::SmallDenseMap<ValuePtr, AffineExpr, 8> seenDims;
SmallVector<AffineExpr, 8> dimRemapping(mapOrSet->getNumDims());
unsigned nextDim = 0;
for (unsigned i = 0, e = mapOrSet->getNumDims(); i != e; ++i) {
@@ -705,7 +705,7 @@ canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet,
}
}
}
- llvm::SmallDenseMap<Value *, AffineExpr, 8> seenSymbols;
+ llvm::SmallDenseMap<ValuePtr, AffineExpr, 8> seenSymbols;
SmallVector<AffineExpr, 8> symRemapping(mapOrSet->getNumSymbols());
unsigned nextSym = 0;
for (unsigned i = 0, e = mapOrSet->getNumSymbols(); i != e; ++i) {
@@ -738,12 +738,12 @@ canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet,
}
void mlir::canonicalizeMapAndOperands(AffineMap *map,
- SmallVectorImpl<Value *> *operands) {
+ SmallVectorImpl<ValuePtr> *operands) {
canonicalizeMapOrSetAndOperands<AffineMap>(map, operands);
}
void mlir::canonicalizeSetAndOperands(IntegerSet *set,
- SmallVectorImpl<Value *> *operands) {
+ SmallVectorImpl<ValuePtr> *operands) {
canonicalizeMapOrSetAndOperands<IntegerSet>(set, operands);
}
@@ -758,7 +758,7 @@ struct SimplifyAffineOp : public OpRewritePattern<AffineOpTy> {
/// Replace the affine op with another instance of it with the supplied
/// map and mapOperands.
void replaceAffineOp(PatternRewriter &rewriter, AffineOpTy affineOp,
- AffineMap map, ArrayRef<Value *> mapOperands) const;
+ AffineMap map, ArrayRef<ValuePtr> mapOperands) const;
PatternMatchResult matchAndRewrite(AffineOpTy affineOp,
PatternRewriter &rewriter) const override {
@@ -770,7 +770,7 @@ struct SimplifyAffineOp : public OpRewritePattern<AffineOpTy> {
auto map = affineOp.getAffineMap();
AffineMap oldMap = map;
auto oldOperands = affineOp.getMapOperands();
- SmallVector<Value *, 8> resultOperands(oldOperands);
+ SmallVector<ValuePtr, 8> resultOperands(oldOperands);
composeAffineMapAndOperands(&map, &resultOperands);
if (map == oldMap && std::equal(oldOperands.begin(), oldOperands.end(),
resultOperands.begin()))
@@ -786,14 +786,14 @@ struct SimplifyAffineOp : public OpRewritePattern<AffineOpTy> {
template <>
void SimplifyAffineOp<AffineLoadOp>::replaceAffineOp(
PatternRewriter &rewriter, AffineLoadOp load, AffineMap map,
- ArrayRef<Value *> mapOperands) const {
+ ArrayRef<ValuePtr> mapOperands) const {
rewriter.replaceOpWithNewOp<AffineLoadOp>(load, load.getMemRef(), map,
mapOperands);
}
template <>
void SimplifyAffineOp<AffinePrefetchOp>::replaceAffineOp(
PatternRewriter &rewriter, AffinePrefetchOp prefetch, AffineMap map,
- ArrayRef<Value *> mapOperands) const {
+ ArrayRef<ValuePtr> mapOperands) const {
rewriter.replaceOpWithNewOp<AffinePrefetchOp>(
prefetch, prefetch.memref(), map, mapOperands,
prefetch.localityHint().getZExtValue(), prefetch.isWrite(),
@@ -802,14 +802,14 @@ void SimplifyAffineOp<AffinePrefetchOp>::replaceAffineOp(
template <>
void SimplifyAffineOp<AffineStoreOp>::replaceAffineOp(
PatternRewriter &rewriter, AffineStoreOp store, AffineMap map,
- ArrayRef<Value *> mapOperands) const {
+ ArrayRef<ValuePtr> mapOperands) const {
rewriter.replaceOpWithNewOp<AffineStoreOp>(
store, store.getValueToStore(), store.getMemRef(), map, mapOperands);
}
template <>
void SimplifyAffineOp<AffineApplyOp>::replaceAffineOp(
PatternRewriter &rewriter, AffineApplyOp apply, AffineMap map,
- ArrayRef<Value *> mapOperands) const {
+ ArrayRef<ValuePtr> mapOperands) const {
rewriter.replaceOpWithNewOp<AffineApplyOp>(apply, map, mapOperands);
}
} // end anonymous namespace.
@@ -844,12 +844,12 @@ static LogicalResult foldMemRefCast(Operation *op) {
// TODO(b/133776335) Check that map operands are loop IVs or symbols.
void AffineDmaStartOp::build(Builder *builder, OperationState &result,
- Value *srcMemRef, AffineMap srcMap,
- ValueRange srcIndices, Value *destMemRef,
+ ValuePtr srcMemRef, AffineMap srcMap,
+ ValueRange srcIndices, ValuePtr destMemRef,
AffineMap dstMap, ValueRange destIndices,
- Value *tagMemRef, AffineMap tagMap,
- ValueRange tagIndices, Value *numElements,
- Value *stride, Value *elementsPerStride) {
+ ValuePtr tagMemRef, AffineMap tagMap,
+ ValueRange tagIndices, ValuePtr numElements,
+ ValuePtr stride, ValuePtr elementsPerStride) {
result.addOperands(srcMemRef);
result.addAttribute(getSrcMapAttrName(), AffineMapAttr::get(srcMap));
result.addOperands(srcIndices);
@@ -980,19 +980,19 @@ LogicalResult AffineDmaStartOp::verify() {
return emitOpError("incorrect number of operands");
}
- for (auto *idx : getSrcIndices()) {
+ for (auto idx : getSrcIndices()) {
if (!idx->getType().isIndex())
return emitOpError("src index to dma_start must have 'index' type");
if (!isValidAffineIndexOperand(idx))
return emitOpError("src index must be a dimension or symbol identifier");
}
- for (auto *idx : getDstIndices()) {
+ for (auto idx : getDstIndices()) {
if (!idx->getType().isIndex())
return emitOpError("dst index to dma_start must have 'index' type");
if (!isValidAffineIndexOperand(idx))
return emitOpError("dst index must be a dimension or symbol identifier");
}
- for (auto *idx : getTagIndices()) {
+ for (auto idx : getTagIndices()) {
if (!idx->getType().isIndex())
return emitOpError("tag index to dma_start must have 'index' type");
if (!isValidAffineIndexOperand(idx))
@@ -1013,8 +1013,8 @@ LogicalResult AffineDmaStartOp::fold(ArrayRef<Attribute> cstOperands,
// TODO(b/133776335) Check that map operands are loop IVs or symbols.
void AffineDmaWaitOp::build(Builder *builder, OperationState &result,
- Value *tagMemRef, AffineMap tagMap,
- ValueRange tagIndices, Value *numElements) {
+ ValuePtr tagMemRef, AffineMap tagMap,
+ ValueRange tagIndices, ValuePtr numElements) {
result.addOperands(tagMemRef);
result.addAttribute(getTagMapAttrName(), AffineMapAttr::get(tagMap));
result.addOperands(tagIndices);
@@ -1023,7 +1023,7 @@ void AffineDmaWaitOp::build(Builder *builder, OperationState &result,
void AffineDmaWaitOp::print(OpAsmPrinter &p) {
p << "affine.dma_wait " << *getTagMemRef() << '[';
- SmallVector<Value *, 2> operands(getTagIndices());
+ SmallVector<ValuePtr, 2> operands(getTagIndices());
p.printAffineMapOfSSAIds(getTagMapAttr(), operands);
p << "], ";
p.printOperand(getNumElements());
@@ -1068,7 +1068,7 @@ ParseResult AffineDmaWaitOp::parse(OpAsmParser &parser,
LogicalResult AffineDmaWaitOp::verify() {
if (!getOperand(0)->getType().isa<MemRefType>())
return emitOpError("expected DMA tag to be of memref type");
- for (auto *idx : getTagIndices()) {
+ for (auto idx : getTagIndices()) {
if (!idx->getType().isIndex())
return emitOpError("index to dma_wait must have 'index' type");
if (!isValidAffineIndexOperand(idx))
@@ -1368,7 +1368,7 @@ static LogicalResult foldLoopBounds(AffineForOp forOp) {
SmallVector<Attribute, 8> operandConstants;
auto boundOperands =
lower ? forOp.getLowerBoundOperands() : forOp.getUpperBoundOperands();
- for (auto *operand : boundOperands) {
+ for (auto operand : boundOperands) {
Attribute operandCst;
matchPattern(operand, m_Constant(&operandCst));
operandConstants.push_back(operandCst);
@@ -1408,8 +1408,8 @@ static LogicalResult foldLoopBounds(AffineForOp forOp) {
/// Canonicalize the bounds of the given loop.
static LogicalResult canonicalizeLoopBounds(AffineForOp forOp) {
- SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
- SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands());
+ SmallVector<ValuePtr, 4> lbOperands(forOp.getLowerBoundOperands());
+ SmallVector<ValuePtr, 4> ubOperands(forOp.getUpperBoundOperands());
auto lbMap = forOp.getLowerBoundMap();
auto ubMap = forOp.getUpperBoundMap();
@@ -1474,7 +1474,7 @@ void AffineForOp::setLowerBound(ValueRange lbOperands, AffineMap map) {
assert(lbOperands.size() == map.getNumInputs());
assert(map.getNumResults() >= 1 && "bound map has at least one result");
- SmallVector<Value *, 4> newOperands(lbOperands.begin(), lbOperands.end());
+ SmallVector<ValuePtr, 4> newOperands(lbOperands.begin(), lbOperands.end());
auto ubOperands = getUpperBoundOperands();
newOperands.append(ubOperands.begin(), ubOperands.end());
@@ -1487,7 +1487,7 @@ void AffineForOp::setUpperBound(ValueRange ubOperands, AffineMap map) {
assert(ubOperands.size() == map.getNumInputs());
assert(map.getNumResults() >= 1 && "bound map has at least one result");
- SmallVector<Value *, 4> newOperands(getLowerBoundOperands());
+ SmallVector<ValuePtr, 4> newOperands(getLowerBoundOperands());
newOperands.append(ubOperands.begin(), ubOperands.end());
getOperation()->setOperands(newOperands);
@@ -1553,7 +1553,7 @@ bool AffineForOp::matchingBoundOperandList() {
unsigned numOperands = lbMap.getNumInputs();
for (unsigned i = 0, e = lbMap.getNumInputs(); i < e; i++) {
- // Compare Value *'s.
+ // Compare ValuePtr 's.
if (getOperand(i) != getOperand(numOperands + i))
return false;
}
@@ -1562,7 +1562,7 @@ bool AffineForOp::matchingBoundOperandList() {
Region &AffineForOp::getLoopBody() { return region(); }
-bool AffineForOp::isDefinedOutsideOfLoop(Value *value) {
+bool AffineForOp::isDefinedOutsideOfLoop(ValuePtr value) {
return !region().isAncestor(value->getParentRegion());
}
@@ -1573,14 +1573,14 @@ LogicalResult AffineForOp::moveOutOfLoop(ArrayRef<Operation *> ops) {
}
/// Returns if the provided value is the induction variable of a AffineForOp.
-bool mlir::isForInductionVar(Value *val) {
+bool mlir::isForInductionVar(ValuePtr val) {
return getForInductionVarOwner(val) != AffineForOp();
}
/// Returns the loop parent of an induction variable. If the provided value is
/// not an induction variable, then return nullptr.
-AffineForOp mlir::getForInductionVarOwner(Value *val) {
- auto *ivArg = dyn_cast<BlockArgument>(val);
+AffineForOp mlir::getForInductionVarOwner(ValuePtr val) {
+ auto ivArg = dyn_cast<BlockArgument>(val);
if (!ivArg || !ivArg->getOwner())
return AffineForOp();
auto *containingInst = ivArg->getOwner()->getParent()->getParentOp();
@@ -1590,7 +1590,7 @@ AffineForOp mlir::getForInductionVarOwner(Value *val) {
/// Extracts the induction variables from a list of AffineForOps and returns
/// them.
void mlir::extractForInductionVars(ArrayRef<AffineForOp> forInsts,
- SmallVectorImpl<Value *> *ivs) {
+ SmallVectorImpl<ValuePtr> *ivs) {
ivs->reserve(forInsts.size());
for (auto forInst : forInsts)
ivs->push_back(forInst.getInductionVar());
@@ -1729,7 +1729,7 @@ void AffineIfOp::build(Builder *builder, OperationState &result, IntegerSet set,
LogicalResult AffineIfOp::fold(ArrayRef<Attribute>,
SmallVectorImpl<OpFoldResult> &) {
auto set = getIntegerSet();
- SmallVector<Value *, 4> operands(getOperands());
+ SmallVector<ValuePtr, 4> operands(getOperands());
canonicalizeSetAndOperands(&set, &operands);
// Any canonicalization change always leads to either a reduction in the
@@ -1759,7 +1759,8 @@ void AffineLoadOp::build(Builder *builder, OperationState &result,
}
void AffineLoadOp::build(Builder *builder, OperationState &result,
- Value *memref, AffineMap map, ValueRange mapOperands) {
+ ValuePtr memref, AffineMap map,
+ ValueRange mapOperands) {
assert(map.getNumInputs() == mapOperands.size() && "inconsistent index info");
result.addOperands(memref);
result.addOperands(mapOperands);
@@ -1769,7 +1770,7 @@ void AffineLoadOp::build(Builder *builder, OperationState &result,
}
void AffineLoadOp::build(Builder *builder, OperationState &result,
- Value *memref, ValueRange indices) {
+ ValuePtr memref, ValueRange indices) {
auto memrefType = memref->getType().cast<MemRefType>();
auto rank = memrefType.getRank();
// Create identity map for memrefs with at least one dimension or () -> ()
@@ -1825,7 +1826,7 @@ LogicalResult AffineLoadOp::verify() {
"expects the number of subscripts to be equal to memref rank");
}
- for (auto *idx : getMapOperands()) {
+ for (auto idx : getMapOperands()) {
if (!idx->getType().isIndex())
return emitOpError("index to load must have 'index' type");
if (!isValidAffineIndexOperand(idx))
@@ -1851,7 +1852,7 @@ OpFoldResult AffineLoadOp::fold(ArrayRef<Attribute> cstOperands) {
//===----------------------------------------------------------------------===//
void AffineStoreOp::build(Builder *builder, OperationState &result,
- Value *valueToStore, Value *memref, AffineMap map,
+ ValuePtr valueToStore, ValuePtr memref, AffineMap map,
ValueRange mapOperands) {
assert(map.getNumInputs() == mapOperands.size() && "inconsistent index info");
result.addOperands(valueToStore);
@@ -1862,7 +1863,7 @@ void AffineStoreOp::build(Builder *builder, OperationState &result,
// Use identity map.
void AffineStoreOp::build(Builder *builder, OperationState &result,
- Value *valueToStore, Value *memref,
+ ValuePtr valueToStore, ValuePtr memref,
ValueRange indices) {
auto memrefType = memref->getType().cast<MemRefType>();
auto rank = memrefType.getRank();
@@ -1923,7 +1924,7 @@ LogicalResult AffineStoreOp::verify() {
"expects the number of subscripts to be equal to memref rank");
}
- for (auto *idx : getMapOperands()) {
+ for (auto idx : getMapOperands()) {
if (!idx->getType().isIndex())
return emitOpError("index to store must have 'index' type");
if (!isValidAffineIndexOperand(idx))
@@ -2072,7 +2073,7 @@ void print(OpAsmPrinter &p, AffinePrefetchOp op) {
p << AffinePrefetchOp::getOperationName() << " " << *op.memref() << '[';
AffineMapAttr mapAttr = op.getAttrOfType<AffineMapAttr>(op.getMapAttrName());
if (mapAttr) {
- SmallVector<Value *, 2> operands(op.getMapOperands());
+ SmallVector<ValuePtr, 2> operands(op.getMapOperands());
p.printAffineMapOfSSAIds(mapAttr, operands);
}
p << ']' << ", " << (op.isWrite() ? "write" : "read") << ", "
@@ -2099,7 +2100,7 @@ LogicalResult verify(AffinePrefetchOp op) {
return op.emitOpError("too few operands");
}
- for (auto *idx : op.getMapOperands()) {
+ for (auto idx : op.getMapOperands()) {
if (!isValidAffineIndexOperand(idx))
return op.emitOpError("index must be a dimension or symbol identifier");
}
diff --git a/mlir/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp b/mlir/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp
index 3982a6a4713..e1951ff900b 100644
--- a/mlir/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp
+++ b/mlir/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp
@@ -46,9 +46,9 @@ struct LowerUniformCastsPass : public FunctionPass<LowerUniformCastsPass> {
// Dequantize
//===----------------------------------------------------------------------===//
-static Value *emitUniformPerLayerDequantize(Location loc, Value *input,
- UniformQuantizedType elementType,
- PatternRewriter &rewriter) {
+static ValuePtr emitUniformPerLayerDequantize(Location loc, ValuePtr input,
+ UniformQuantizedType elementType,
+ PatternRewriter &rewriter) {
// Pre-conditions.
if (!elementType.isSigned()) {
// TODO: Support unsigned storage type.
@@ -71,7 +71,7 @@ static Value *emitUniformPerLayerDequantize(Location loc, Value *input,
// Apply zero-point offset.
if (elementType.getZeroPoint() != 0) {
- Value *negZeroPointConst = rewriter.create<ConstantOp>(
+ ValuePtr negZeroPointConst = rewriter.create<ConstantOp>(
loc, broadcastScalarConstIntValue(intermediateType,
-elementType.getZeroPoint()));
input = rewriter.create<AddIOp>(loc, input, negZeroPointConst);
@@ -81,14 +81,14 @@ static Value *emitUniformPerLayerDequantize(Location loc, Value *input,
input = rewriter.create<ConvertISToFOp>(loc, realType, input);
// Mul by scale.
- Value *scaleConst = rewriter.create<ConstantOp>(
+ ValuePtr scaleConst = rewriter.create<ConstantOp>(
loc, broadcastScalarConstFloatValue(realType,
APFloat(elementType.getScale())));
return rewriter.create<MulFOp>(loc, input, scaleConst);
}
-static Value *
-emitUniformPerAxisDequantize(Location loc, Value *input,
+static ValuePtr
+emitUniformPerAxisDequantize(Location loc, ValuePtr input,
UniformQuantizedPerAxisType elementType,
PatternRewriter &rewriter) {
// TODO: Support per-axis dequantize.
@@ -97,8 +97,8 @@ emitUniformPerAxisDequantize(Location loc, Value *input,
return nullptr;
}
-static Value *emitDequantize(Location loc, Value *input,
- PatternRewriter &rewriter) {
+static ValuePtr emitDequantize(Location loc, ValuePtr input,
+ PatternRewriter &rewriter) {
Type inputType = input->getType();
QuantizedType qElementType =
QuantizedType::getQuantizedElementType(inputType);
@@ -133,7 +133,7 @@ struct UniformDequantizePattern : public OpRewritePattern<DequantizeCastOp> {
return matchFailure();
}
- Value *dequantizedValue = emitDequantize(op.getLoc(), op.arg(), rewriter);
+ ValuePtr dequantizedValue = emitDequantize(op.getLoc(), op.arg(), rewriter);
if (!dequantizedValue) {
return matchFailure();
}
@@ -170,14 +170,14 @@ tryRewriteAffineAddEwIsomorphicSigned(const UniformBinaryOpInfo &info,
castElementType(info.resultStorageType, intermediateElementType);
// Cast operands to storage type.
- Value *lhsValue = rewriter
- .create<StorageCastOp>(info.op->getLoc(),
- info.lhsStorageType, info.lhs)
- .getResult();
- Value *rhsValue = rewriter
- .create<StorageCastOp>(info.op->getLoc(),
- info.rhsStorageType, info.rhs)
- .getResult();
+ ValuePtr lhsValue = rewriter
+ .create<StorageCastOp>(info.op->getLoc(),
+ info.lhsStorageType, info.lhs)
+ .getResult();
+ ValuePtr rhsValue = rewriter
+ .create<StorageCastOp>(info.op->getLoc(),
+ info.rhsStorageType, info.rhs)
+ .getResult();
// Cast to the intermediate sized type.
lhsValue = rewriter.create<ConvertISOp>(info.op->getLoc(), intermediateType,
@@ -186,7 +186,7 @@ tryRewriteAffineAddEwIsomorphicSigned(const UniformBinaryOpInfo &info,
rhsValue);
// Add.
- Value *resultValue =
+ ValuePtr resultValue =
rewriter.create<AddIOp>(info.op->getLoc(), lhsValue, rhsValue);
// Zero point offset adjustment.
@@ -194,7 +194,7 @@ tryRewriteAffineAddEwIsomorphicSigned(const UniformBinaryOpInfo &info,
// zpOffset = -zp
int zpOffset = -1 * info.resultType.getZeroPoint();
if (zpOffset != 0) {
- Value *zpOffsetConst = rewriter.create<ConstantOp>(
+ ValuePtr zpOffsetConst = rewriter.create<ConstantOp>(
info.op->getLoc(),
broadcastScalarConstIntValue(intermediateType, zpOffset));
resultValue =
@@ -246,14 +246,14 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info,
castElementType(info.resultStorageType, intermediateElementType);
// Cast operands to storage type.
- Value *lhsValue = rewriter
- .create<StorageCastOp>(info.op->getLoc(),
- info.lhsStorageType, info.lhs)
- .getResult();
- Value *rhsValue = rewriter
- .create<StorageCastOp>(info.op->getLoc(),
- info.rhsStorageType, info.rhs)
- .getResult();
+ ValuePtr lhsValue = rewriter
+ .create<StorageCastOp>(info.op->getLoc(),
+ info.lhsStorageType, info.lhs)
+ .getResult();
+ ValuePtr rhsValue = rewriter
+ .create<StorageCastOp>(info.op->getLoc(),
+ info.rhsStorageType, info.rhs)
+ .getResult();
// Cast to the intermediate sized type.
lhsValue = rewriter.create<ConvertISOp>(info.op->getLoc(), intermediateType,
@@ -263,7 +263,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info,
// Apply argument zeroPoints.
if (info.lhsType.getZeroPoint() != 0) {
- Value *zpOffsetConst = rewriter.create<ConstantOp>(
+ ValuePtr zpOffsetConst = rewriter.create<ConstantOp>(
info.op->getLoc(), broadcastScalarConstIntValue(
intermediateType, -info.lhsType.getZeroPoint()));
lhsValue =
@@ -271,7 +271,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info,
}
if (info.rhsType.getZeroPoint() != 0) {
- Value *zpOffsetConst = rewriter.create<ConstantOp>(
+ ValuePtr zpOffsetConst = rewriter.create<ConstantOp>(
info.op->getLoc(), broadcastScalarConstIntValue(
intermediateType, -info.rhsType.getZeroPoint()));
rhsValue =
@@ -279,7 +279,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info,
}
// Mul.
- Value *resultValue =
+ ValuePtr resultValue =
rewriter.create<MulIOp>(info.op->getLoc(), lhsValue, rhsValue);
// Scale output.
@@ -293,7 +293,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info,
// Zero point offset adjustment.
if (info.resultType.getZeroPoint() != 0) {
- Value *zpOffsetConst = rewriter.create<ConstantOp>(
+ ValuePtr zpOffsetConst = rewriter.create<ConstantOp>(
info.op->getLoc(),
broadcastScalarConstIntValue(intermediateType,
info.resultType.getZeroPoint()));
diff --git a/mlir/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h b/mlir/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h
index 955e2ecc88c..57a8422b362 100644
--- a/mlir/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h
+++ b/mlir/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h
@@ -59,7 +59,7 @@ template <typename F> bool integralLog2(F x, int &log2Result) {
/// Helper class for operating on binary operations where all operands
/// and the result are a UniformQuantizedType.
struct UniformBinaryOpInfo {
- UniformBinaryOpInfo(Operation *op, Value *lhs, Value *rhs,
+ UniformBinaryOpInfo(Operation *op, ValuePtr lhs, ValuePtr rhs,
Optional<APFloat> clampMin, Optional<APFloat> clampMax)
: op(op), lhs(lhs), rhs(rhs), clampMin(clampMin), clampMax(clampMax),
lhsType(getUniformElementType(lhs->getType())),
@@ -128,8 +128,8 @@ struct UniformBinaryOpInfo {
}
Operation *op;
- Value *lhs;
- Value *rhs;
+ ValuePtr lhs;
+ ValuePtr rhs;
Optional<APFloat> clampMin;
Optional<APFloat> clampMax;
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index 9c0183eb90f..349c1fa4644 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -145,7 +145,7 @@ static LogicalResult verifyAllReduce(gpu::AllReduceOp allReduce) {
if (!allReduce.body().empty()) {
if (allReduce.body().front().getNumArguments() != 2)
return allReduce.emitError("expected two region arguments");
- for (auto *argument : allReduce.body().front().getArguments()) {
+ for (auto argument : allReduce.body().front().getArguments()) {
if (argument->getType() != allReduce.getType())
return allReduce.emitError("incorrect region argument type");
}
@@ -213,15 +213,15 @@ static ParseResult parseShuffleOp(OpAsmParser &parser, OperationState &state) {
static SmallVector<Type, 4> getValueTypes(ValueRange values) {
SmallVector<Type, 4> types;
types.reserve(values.size());
- for (Value *v : values)
+ for (ValuePtr v : values)
types.push_back(v->getType());
return types;
}
-void LaunchOp::build(Builder *builder, OperationState &result, Value *gridSizeX,
- Value *gridSizeY, Value *gridSizeZ, Value *blockSizeX,
- Value *blockSizeY, Value *blockSizeZ,
- ValueRange operands) {
+void LaunchOp::build(Builder *builder, OperationState &result,
+ ValuePtr gridSizeX, ValuePtr gridSizeY, ValuePtr gridSizeZ,
+ ValuePtr blockSizeX, ValuePtr blockSizeY,
+ ValuePtr blockSizeZ, ValueRange operands) {
// Add grid and block sizes as op operands, followed by the data operands.
result.addOperands(
{gridSizeX, gridSizeY, gridSizeZ, blockSizeX, blockSizeY, blockSizeZ});
@@ -489,22 +489,22 @@ class PropagateConstantBounds : public OpRewritePattern<LaunchOp> {
// and use it instead of passing the value from the parent region. Perform
// the traversal in the inverse order to simplify index arithmetics when
// dropping arguments.
- SmallVector<Value *, 8> operands(launchOp.getKernelOperandValues().begin(),
- launchOp.getKernelOperandValues().end());
- SmallVector<Value *, 8> kernelArgs(launchOp.getKernelArguments().begin(),
- launchOp.getKernelArguments().end());
+ SmallVector<ValuePtr, 8> operands(launchOp.getKernelOperandValues().begin(),
+ launchOp.getKernelOperandValues().end());
+ SmallVector<ValuePtr, 8> kernelArgs(launchOp.getKernelArguments().begin(),
+ launchOp.getKernelArguments().end());
bool found = false;
for (unsigned i = operands.size(); i > 0; --i) {
unsigned index = i - 1;
- Value *operand = operands[index];
+ ValuePtr operand = operands[index];
if (!isa_and_nonnull<ConstantOp>(operand->getDefiningOp())) {
continue;
}
found = true;
- Value *internalConstant =
+ ValuePtr internalConstant =
rewriter.clone(*operand->getDefiningOp())->getResult(0);
- Value *kernelArg = kernelArgs[index];
+ ValuePtr kernelArg = kernelArgs[index];
kernelArg->replaceAllUsesWith(internalConstant);
launchOp.eraseKernelArgument(index);
}
@@ -529,10 +529,10 @@ void LaunchOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
//===----------------------------------------------------------------------===//
void LaunchFuncOp::build(Builder *builder, OperationState &result,
- GPUFuncOp kernelFunc, Value *gridSizeX,
- Value *gridSizeY, Value *gridSizeZ, Value *blockSizeX,
- Value *blockSizeY, Value *blockSizeZ,
- ValueRange kernelOperands) {
+ GPUFuncOp kernelFunc, ValuePtr gridSizeX,
+ ValuePtr gridSizeY, ValuePtr gridSizeZ,
+ ValuePtr blockSizeX, ValuePtr blockSizeY,
+ ValuePtr blockSizeZ, ValueRange kernelOperands) {
// Add grid and block sizes as op operands, followed by the data operands.
result.addOperands(
{gridSizeX, gridSizeY, gridSizeZ, blockSizeX, blockSizeY, blockSizeZ});
@@ -565,7 +565,7 @@ StringRef LaunchFuncOp::getKernelModuleName() {
.getRootReference();
}
-Value *LaunchFuncOp::getKernelOperand(unsigned i) {
+ValuePtr LaunchFuncOp::getKernelOperand(unsigned i) {
return getOperation()->getOperand(i + kNumConfigOperands);
}
@@ -728,13 +728,14 @@ static ParseResult parseGPUFuncOp(OpAsmParser &parser, OperationState &result) {
}
static void printAttributions(OpAsmPrinter &p, StringRef keyword,
- ArrayRef<BlockArgument *> values) {
+ ArrayRef<BlockArgumentPtr> values) {
if (values.empty())
return;
p << ' ' << keyword << '(';
- interleaveComma(values, p,
- [&p](BlockArgument *v) { p << *v << " : " << v->getType(); });
+ interleaveComma(values, p, [&p](BlockArgumentPtr v) {
+ p << *v << " : " << v->getType();
+ });
p << ')';
}
@@ -781,9 +782,9 @@ LogicalResult GPUFuncOp::verifyType() {
}
static LogicalResult verifyAttributions(Operation *op,
- ArrayRef<BlockArgument *> attributions,
+ ArrayRef<BlockArgumentPtr> attributions,
unsigned memorySpace) {
- for (Value *v : attributions) {
+ for (ValuePtr v : attributions) {
auto type = v->getType().dyn_cast<MemRefType>();
if (!type)
return op->emitOpError() << "expected memref type in attribution";
diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index 0a6a5915633..8f5f50e4909 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -31,10 +31,10 @@ using namespace mlir;
template <typename OpTy>
static void createForAllDimensions(OpBuilder &builder, Location loc,
- SmallVectorImpl<Value *> &values) {
+ SmallVectorImpl<ValuePtr> &values) {
for (StringRef dim : {"x", "y", "z"}) {
- Value *v = builder.create<OpTy>(loc, builder.getIndexType(),
- builder.getStringAttr(dim));
+ ValuePtr v = builder.create<OpTy>(loc, builder.getIndexType(),
+ builder.getStringAttr(dim));
values.push_back(v);
}
}
@@ -46,7 +46,7 @@ static void injectGpuIndexOperations(Location loc, Region &body) {
OpBuilder builder(loc->getContext());
Block &firstBlock = body.front();
builder.setInsertionPointToStart(&firstBlock);
- SmallVector<Value *, 12> indexOps;
+ SmallVector<ValuePtr, 12> indexOps;
createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps);
createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps);
createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps);
@@ -69,7 +69,7 @@ static gpu::LaunchFuncOp inlineBeneficiaryOps(gpu::GPUFuncOp kernelFunc,
gpu::LaunchFuncOp launch) {
OpBuilder kernelBuilder(kernelFunc.getBody());
auto &firstBlock = kernelFunc.getBody().front();
- SmallVector<Value *, 8> newLaunchArgs;
+ SmallVector<ValuePtr, 8> newLaunchArgs;
BlockAndValueMapping map;
for (int i = 0, e = launch.getNumKernelOperands(); i < e; ++i) {
map.map(launch.getKernelOperand(i), kernelFunc.getArgument(i));
@@ -82,7 +82,7 @@ static gpu::LaunchFuncOp inlineBeneficiaryOps(gpu::GPUFuncOp kernelFunc,
}
// Only inline operations that do not create new arguments.
if (!llvm::all_of(operandOp->getOperands(),
- [map](Value *value) { return map.contains(value); })) {
+ [map](ValuePtr value) { return map.contains(value); })) {
continue;
}
auto clone = kernelBuilder.clone(*operandOp, map);
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 1813b30165f..b94ee335bd2 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -415,7 +415,7 @@ static ParseResult parseCallOp(OpAsmParser &parser, OperationState &result) {
// Expects vector to be of wrapped LLVM vector type and position to be of
// wrapped LLVM i32 type.
void LLVM::ExtractElementOp::build(Builder *b, OperationState &result,
- Value *vector, Value *position,
+ ValuePtr vector, ValuePtr position,
ArrayRef<NamedAttribute> attrs) {
auto wrappedVectorType = vector->getType().cast<LLVM::LLVMType>();
auto llvmType = wrappedVectorType.getVectorElementType();
@@ -681,7 +681,7 @@ static void printBrOp(OpAsmPrinter &p, BrOp &op) {
// attribute-dict?
static ParseResult parseBrOp(OpAsmParser &parser, OperationState &result) {
Block *dest;
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
if (parser.parseSuccessorAndUseList(dest, operands) ||
parser.parseOptionalAttrDict(result.attributes))
return failure();
@@ -708,8 +708,8 @@ static void printCondBrOp(OpAsmPrinter &p, CondBrOp &op) {
static ParseResult parseCondBrOp(OpAsmParser &parser, OperationState &result) {
Block *trueDest;
Block *falseDest;
- SmallVector<Value *, 4> trueOperands;
- SmallVector<Value *, 4> falseOperands;
+ SmallVector<ValuePtr, 4> trueOperands;
+ SmallVector<ValuePtr, 4> falseOperands;
OpAsmParser::OperandType condition;
Builder &builder = parser.getBuilder();
@@ -1066,8 +1066,8 @@ static LogicalResult verify(GlobalOp op) {
//===----------------------------------------------------------------------===//
// Expects vector to be of wrapped LLVM vector type and position to be of
// wrapped LLVM i32 type.
-void LLVM::ShuffleVectorOp::build(Builder *b, OperationState &result, Value *v1,
- Value *v2, ArrayAttr mask,
+void LLVM::ShuffleVectorOp::build(Builder *b, OperationState &result,
+ ValuePtr v1, ValuePtr v2, ArrayAttr mask,
ArrayRef<NamedAttribute> attrs) {
auto wrappedContainerType1 = v1->getType().cast<LLVM::LLVMType>();
auto vType = LLVMType::getVectorTy(
@@ -1664,10 +1664,10 @@ LLVMType LLVMType::getVoidTy(LLVMDialect *dialect) {
// Utility functions.
//===----------------------------------------------------------------------===//
-Value *mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder,
- StringRef name, StringRef value,
- LLVM::Linkage linkage,
- LLVM::LLVMDialect *llvmDialect) {
+ValuePtr mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder,
+ StringRef name, StringRef value,
+ LLVM::Linkage linkage,
+ LLVM::LLVMDialect *llvmDialect) {
assert(builder.getInsertionBlock() &&
builder.getInsertionBlock()->getParentOp() &&
"expected builder to point to a block constrained in an op");
@@ -1684,13 +1684,13 @@ Value *mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder,
builder.getStringAttr(value));
// Get the pointer to the first character in the global string.
- Value *globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
- Value *cst0 = builder.create<LLVM::ConstantOp>(
+ ValuePtr globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
+ ValuePtr cst0 = builder.create<LLVM::ConstantOp>(
loc, LLVM::LLVMType::getInt64Ty(llvmDialect),
builder.getIntegerAttr(builder.getIndexType(), 0));
return builder.create<LLVM::GEPOp>(
loc, LLVM::LLVMType::getInt8PtrTy(llvmDialect), globalPtr,
- ArrayRef<Value *>({cst0, cst0}));
+ ArrayRef<ValuePtr>({cst0, cst0}));
}
bool mlir::LLVM::satisfiesLLVMModule(Operation *op) {
diff --git a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
index d7e4d08527d..ee122e16037 100644
--- a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
+++ b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
@@ -49,7 +49,7 @@ static StringRef toStringRef(LinalgDependenceGraph::DependenceType dt) {
llvm_unreachable("Unexpected DependenceType");
}
-Value *Aliases::find(Value *v) {
+ValuePtr Aliases::find(ValuePtr v) {
if (isa<BlockArgument>(v))
return v;
@@ -147,9 +147,9 @@ LinalgDependenceGraph::getDependencesInto(
}
void LinalgDependenceGraph::addDependencesBetween(LinalgOp src, LinalgOp dst) {
- for (auto *srcView : src.getOutputs()) { // W
+ for (auto srcView : src.getOutputs()) { // W
// RAW graph
- for (auto *dstView : dst.getInputs()) { // R
+ for (auto dstView : dst.getInputs()) { // R
if (aliases.alias(srcView, dstView)) { // if alias, fill RAW
addDependenceElem(DependenceType::RAW,
LinalgOpView{src.getOperation(), srcView},
@@ -157,7 +157,7 @@ void LinalgDependenceGraph::addDependencesBetween(LinalgOp src, LinalgOp dst) {
}
}
// WAW graph
- for (auto *dstView : dst.getOutputs()) { // W
+ for (auto dstView : dst.getOutputs()) { // W
if (aliases.alias(srcView, dstView)) { // if alias, fill WAW
addDependenceElem(DependenceType::WAW,
LinalgOpView{src.getOperation(), srcView},
@@ -165,9 +165,9 @@ void LinalgDependenceGraph::addDependencesBetween(LinalgOp src, LinalgOp dst) {
}
}
}
- for (auto *srcView : src.getInputs()) { // R
+ for (auto srcView : src.getInputs()) { // R
// RAR graph
- for (auto *dstView : dst.getInputs()) { // R
+ for (auto dstView : dst.getInputs()) { // R
if (aliases.alias(srcView, dstView)) { // if alias, fill RAR
addDependenceElem(DependenceType::RAR,
LinalgOpView{src.getOperation(), srcView},
@@ -175,7 +175,7 @@ void LinalgDependenceGraph::addDependencesBetween(LinalgOp src, LinalgOp dst) {
}
}
// WAR graph
- for (auto *dstView : dst.getOutputs()) { // W
+ for (auto dstView : dst.getOutputs()) { // W
if (aliases.alias(srcView, dstView)) { // if alias, fill WAR
addDependenceElem(DependenceType::WAR,
LinalgOpView{src.getOperation(), srcView},
@@ -194,14 +194,14 @@ LinalgDependenceGraph::findCoveringDependences(LinalgOp srcLinalgOp,
}
SmallVector<Operation *, 8> LinalgDependenceGraph::findCoveringWrites(
- LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, Value *view) const {
+ LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, ValuePtr view) const {
return findOperationsWithCoveringDependences(
srcLinalgOp, dstLinalgOp, view,
{DependenceType::WAW, DependenceType::WAR});
}
SmallVector<Operation *, 8> LinalgDependenceGraph::findCoveringReads(
- LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, Value *view) const {
+ LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, ValuePtr view) const {
return findOperationsWithCoveringDependences(
srcLinalgOp, dstLinalgOp, view,
{DependenceType::RAR, DependenceType::RAW});
@@ -209,7 +209,7 @@ SmallVector<Operation *, 8> LinalgDependenceGraph::findCoveringReads(
SmallVector<Operation *, 8>
LinalgDependenceGraph::findOperationsWithCoveringDependences(
- LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, Value *view,
+ LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, ValuePtr view,
ArrayRef<DependenceType> types) const {
auto *src = srcLinalgOp.getOperation();
auto *dst = dstLinalgOp.getOperation();
diff --git a/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp b/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
index ba96186da38..7b530d7f0df 100644
--- a/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
+++ b/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
@@ -44,8 +44,8 @@ static void getMaxDimIndex(ArrayRef<StructuredIndexed> structuredIndices,
Operation *mlir::edsc::makeLinalgGenericOp(
ArrayRef<IterType> iteratorTypes, ArrayRef<StructuredIndexed> inputs,
ArrayRef<StructuredIndexed> outputs,
- function_ref<void(ArrayRef<BlockArgument *>)> regionBuilder,
- ArrayRef<Value *> otherValues, ArrayRef<Attribute> otherAttributes) {
+ function_ref<void(ArrayRef<BlockArgumentPtr>)> regionBuilder,
+ ArrayRef<ValuePtr> otherValues, ArrayRef<Attribute> otherAttributes) {
auto &builder = edsc::ScopedContext::getBuilder();
auto *ctx = builder.getContext();
unsigned nInputs = inputs.size();
@@ -66,7 +66,7 @@ Operation *mlir::edsc::makeLinalgGenericOp(
AffineMap::get(/*dimCount=*/nDims, /*symbolCount=*/0, out.getExprs()));
unsigned nViews = nInputs + nOutputs;
- SmallVector<Value *, 4> values;
+ SmallVector<ValuePtr, 4> values;
values.reserve(nViews);
values.append(inputs.begin(), inputs.end());
values.append(outputs.begin(), outputs.end());
@@ -109,7 +109,7 @@ Operation *mlir::edsc::makeLinalgGenericOp(
return op;
}
-void mlir::edsc::ops::macRegionBuilder(ArrayRef<BlockArgument *> args) {
+void mlir::edsc::ops::macRegionBuilder(ArrayRef<BlockArgumentPtr> args) {
using edsc::op::operator+;
using edsc::op::operator*;
assert(args.size() == 3 && "expected 3 block arguments");
@@ -122,7 +122,7 @@ Operation *mlir::edsc::ops::linalg_pointwise(UnaryPointwiseOpBuilder unaryOp,
StructuredIndexed O) {
SmallVector<edsc::IterType, 4> iterTypes(O.getExprs().size(),
edsc::IterType::Parallel);
- auto fun = [&unaryOp](ArrayRef<BlockArgument *> args) {
+ auto fun = [&unaryOp](ArrayRef<BlockArgumentPtr> args) {
assert(args.size() == 2 && "expected 2 block arguments");
ValueHandle a(args[0]);
linalg_yield(unaryOp(a));
@@ -135,7 +135,7 @@ Operation *mlir::edsc::ops::linalg_pointwise_tanh(StructuredIndexed I,
;
using edsc::intrinsics::tanh;
UnaryPointwiseOpBuilder unOp(
- [](ValueHandle a) -> Value * { return tanh(a); });
+ [](ValueHandle a) -> ValuePtr { return tanh(a); });
return linalg_pointwise(unOp, I, O);
}
@@ -146,7 +146,7 @@ Operation *mlir::edsc::ops::linalg_pointwise(BinaryPointwiseOpBuilder binaryOp,
StructuredIndexed O) {
SmallVector<edsc::IterType, 4> iterTypes(O.getExprs().size(),
edsc::IterType::Parallel);
- auto fun = [&binaryOp](ArrayRef<BlockArgument *> args) {
+ auto fun = [&binaryOp](ArrayRef<BlockArgumentPtr> args) {
assert(args.size() == 3 && "expected 3 block arguments");
ValueHandle a(args[0]), b(args[1]);
linalg_yield(binaryOp(a, b));
@@ -159,14 +159,14 @@ Operation *mlir::edsc::ops::linalg_pointwise_add(StructuredIndexed I1,
StructuredIndexed O) {
using edsc::op::operator+;
BinaryPointwiseOpBuilder binOp(
- [](ValueHandle a, ValueHandle b) -> Value * { return a + b; });
+ [](ValueHandle a, ValueHandle b) -> ValuePtr { return a + b; });
return linalg_pointwise(binOp, I1, I2, O);
}
Operation *mlir::edsc::ops::linalg_pointwise_max(StructuredIndexed I1,
StructuredIndexed I2,
StructuredIndexed O) {
- BinaryPointwiseOpBuilder binOp([](ValueHandle a, ValueHandle b) -> Value * {
+ BinaryPointwiseOpBuilder binOp([](ValueHandle a, ValueHandle b) -> ValuePtr {
using edsc::intrinsics::select;
using edsc::op::operator>;
return select(a > b, a, b).getValue();
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 6eca181e9b4..c5f30b7e10b 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -318,7 +318,7 @@ static ParseResult parseRangeOp(OpAsmParser &parser, OperationState &result) {
// SliceOp
//===----------------------------------------------------------------------===//
void mlir::linalg::SliceOp::build(Builder *b, OperationState &result,
- Value *base, ValueRange indexings) {
+ ValuePtr base, ValueRange indexings) {
result.addOperands(base);
result.addOperands(indexings);
@@ -394,7 +394,7 @@ static LogicalResult verify(SliceOp op) {
// TransposeOp
//===----------------------------------------------------------------------===//
void mlir::linalg::TransposeOp::build(Builder *b, OperationState &result,
- Value *view, AffineMapAttr permutation,
+ ValuePtr view, AffineMapAttr permutation,
ArrayRef<NamedAttribute> attrs) {
auto permutationMap = permutation.getValue();
assert(permutationMap);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 453daba204c..49cea7e4170 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -77,16 +77,16 @@ static llvm::cl::list<unsigned> clTileSizes(
static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
ArrayRef<SubViewOp::Range> loopRanges) {
auto maps = loopToOperandRangesMaps(op);
- SmallVector<Value *, 8> clonedViews;
+ SmallVector<ValuePtr, 8> clonedViews;
clonedViews.reserve(op.getNumInputsAndOutputs());
// Iterate over the inputs and outputs in order.
// Extract the subranges from the linearized ranges.
- SmallVector<Value *, 8> ios(op.getInputsAndOutputs());
+ SmallVector<ValuePtr, 8> ios(op.getInputsAndOutputs());
for (auto en : llvm::enumerate(ios)) {
unsigned idx = en.index();
auto map = maps[idx];
LLVM_DEBUG(dbgs() << "map: " << map << "\n");
- Value *view = en.value();
+ ValuePtr view = en.value();
SmallVector<SubViewOp::Range, 4> viewRanges(map.getNumResults());
for (auto en2 : llvm::enumerate(map.getResults())) {
unsigned d = en2.index();
@@ -99,7 +99,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
}
// Construct a new subview for the tile.
unsigned rank = viewRanges.size();
- SmallVector<Value *, 4> offsets, sizes, strides;
+ SmallVector<ValuePtr, 4> offsets, sizes, strides;
offsets.reserve(rank);
sizes.reserve(rank);
strides.reserve(rank);
@@ -117,7 +117,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
}
struct ViewDimension {
- Value *view;
+ ValuePtr view;
unsigned dimension;
};
@@ -130,14 +130,14 @@ static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) {
auto maps = loopToOperandRangesMaps(op);
// Iterate over the inputs and outputs in order.
// Extract the subranges from the linearized ranges.
- SmallVector<Value *, 8> ios(op.getInputsAndOutputs());
+ SmallVector<ValuePtr, 8> ios(op.getInputsAndOutputs());
for (auto en : llvm::enumerate(ios)) {
unsigned idx = en.index();
auto map = maps[idx];
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange I/O idx: " << idx << "\n");
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange map: " << map << "\n");
- Value *view = en.value();
- SmallVector<Value *, 8> viewRanges(map.getNumResults(), nullptr);
+ ValuePtr view = en.value();
+ SmallVector<ValuePtr, 8> viewRanges(map.getNumResults(), nullptr);
for (auto en2 : llvm::enumerate(map.getResults())) {
if (loopDepth == en2.value().cast<AffineDimExpr>().getPosition()) {
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange loopDepth: " << loopDepth
@@ -151,9 +151,9 @@ static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) {
llvm_unreachable("Expect to be able to extract a view defining loop range");
}
-static LinalgOp fuse(Value *producedView, LinalgOp producer, LinalgOp consumer,
- unsigned consumerIdx, unsigned producerIdx,
- OperationFolder *folder) {
+static LinalgOp fuse(ValuePtr producedView, LinalgOp producer,
+ LinalgOp consumer, unsigned consumerIdx,
+ unsigned producerIdx, OperationFolder *folder) {
auto subView = dyn_cast_or_null<SubViewOp>(
consumer.getInput(consumerIdx)->getDefiningOp());
auto slice = dyn_cast_or_null<SliceOp>(
@@ -206,7 +206,7 @@ static LinalgOp fuse(Value *producedView, LinalgOp producer, LinalgOp consumer,
// Encode structural fusion safety preconditions.
// Some of these will be lifted in the future with better analysis.
static bool isStructurallyFusableProducer(LinalgOp producer,
- Value *consumedView,
+ ValuePtr consumedView,
LinalgOp consumer) {
if (producer.getNumOutputs() != 1) {
LLVM_DEBUG(dbgs() << "\nNot structurally fusable (multi-output)");
@@ -226,7 +226,7 @@ static bool isStructurallyFusableProducer(LinalgOp producer,
bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
LinalgOp consumer,
- Value *consumedView,
+ ValuePtr consumedView,
LinalgOp producer) {
// Make some simple structural checks that alleviate the need for more
// complex analyses.
@@ -245,7 +245,7 @@ bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
}
bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph,
- LinalgOp consumer, Value *consumedView,
+ LinalgOp consumer, ValuePtr consumedView,
LinalgOp producer) {
if (!isProducerLastWriteOfView(graph, consumer, consumedView, producer))
return false;
@@ -272,13 +272,13 @@ Optional<FusionInfo> mlir::linalg::fuseProducerOf(
auto producer = cast<LinalgOp>(dependence.dependentOpView.op);
// Check that the dependence is indeed on the input `consumerIdx` view.
- auto *consumedView = dependence.indexingView;
+ auto consumedView = dependence.indexingView;
if (consumer.getInput(consumerIdx) != consumedView)
continue;
// Consumer consumes this view, `isStructurallyFusableProducer` also checks
// whether it is a strict subview of the producer view.
- auto *producedView = dependence.dependentOpView.view;
+ auto producedView = dependence.dependentOpView.view;
auto producerIdx = producer.getIndexOfOutput(producedView).getValue();
// `consumerIdx` and `producerIdx` exist by construction.
LLVM_DEBUG(dbgs() << "\nRAW producer: " << *producer.getOperation()
diff --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
index c50c495750f..e468c19a0b4 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
@@ -49,7 +49,7 @@ using edsc::op::operator==;
static SmallVector<ValueHandle, 8>
makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<Value *> vals) {
+ ArrayRef<ValuePtr> vals) {
assert(map.getNumSymbols() == 0);
assert(map.getNumInputs() == vals.size());
SmallVector<ValueHandle, 8> res;
@@ -57,35 +57,35 @@ makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map,
auto dims = map.getNumDims();
for (auto e : map.getResults()) {
auto exprMap = AffineMap::get(dims, 0, e);
- SmallVector<Value *, 4> operands(vals.begin(), vals.end());
+ SmallVector<ValuePtr, 4> operands(vals.begin(), vals.end());
canonicalizeMapAndOperands(&exprMap, &operands);
res.push_back(affine_apply(exprMap, operands));
}
return res;
}
-static SmallVector<Value *, 4> permuteIvs(ArrayRef<Value *> ivs,
- Optional<AffineMap> permutation) {
+static SmallVector<ValuePtr, 4> permuteIvs(ArrayRef<ValuePtr> ivs,
+ Optional<AffineMap> permutation) {
return permutation ? applyMapToValues(ScopedContext::getBuilder(),
ScopedContext::getLocation(),
permutation.getValue(), ivs)
- : SmallVector<Value *, 4>(ivs.begin(), ivs.end());
+ : SmallVector<ValuePtr, 4>(ivs.begin(), ivs.end());
}
// Creates a number of ranges equal to the number of results in `map`.
// The returned ranges correspond to the loop ranges, in the proper order, for
// which new loops will be created.
-static SmallVector<Value *, 4> emitLoopRanges(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<Value *> allViewSizes);
-SmallVector<Value *, 4> emitLoopRanges(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<Value *> allViewSizes) {
+static SmallVector<ValuePtr, 4> emitLoopRanges(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<ValuePtr> allViewSizes);
+SmallVector<ValuePtr, 4> emitLoopRanges(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<ValuePtr> allViewSizes) {
// Apply `map` to get view sizes in loop order.
auto sizes = applyMapToValues(b, loc, map, allViewSizes);
// Create a new range with the applied tile sizes.
ScopedContext scope(b, loc);
- SmallVector<Value *, 4> res;
+ SmallVector<ValuePtr, 4> res;
for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx) {
res.push_back(range(constant_index(0), sizes[idx], constant_index(1)));
}
@@ -98,7 +98,7 @@ class LinalgScopedEmitter {};
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, CopyOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
CopyOp copyOp) {
auto nPar = copyOp.getNumParallelLoops();
assert(nPar == allIvs.size());
@@ -121,7 +121,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, FillOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
FillOp fillOp) {
auto nPar = fillOp.getNumParallelLoops();
assert(nPar == allIvs.size());
@@ -138,7 +138,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, DotOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs, DotOp dotOp) {
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs, DotOp dotOp) {
assert(allIvs.size() == 1);
IndexHandle r_i(allIvs[0]);
IndexedValueType A(dotOp.getInput(0)), B(dotOp.getInput(1)),
@@ -151,7 +151,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, MatvecOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
MatvecOp matvecOp) {
assert(allIvs.size() == 2);
IndexHandle i(allIvs[0]), r_j(allIvs[1]);
@@ -165,7 +165,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, MatmulOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
MatmulOp matmulOp) {
assert(allIvs.size() == 3);
IndexHandle i(allIvs[0]), j(allIvs[1]), r_k(allIvs[2]);
@@ -179,7 +179,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, ConvOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
ConvOp convOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
@@ -229,14 +229,14 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, GenericOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
GenericOp genericOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
using edsc::intrinsics::detail::ValueHandleArray;
unsigned nInputs = genericOp.getNumInputs();
unsigned nOutputs = genericOp.getNumOutputs();
- SmallVector<Value *, 4> indexedValues(nInputs + nOutputs);
+ SmallVector<ValuePtr, 4> indexedValues(nInputs + nOutputs);
// 1.a. Emit std_load from input views.
for (unsigned i = 0; i < nInputs; ++i) {
@@ -324,7 +324,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, IndexedGenericOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
IndexedGenericOp indexedGenericOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
@@ -332,7 +332,7 @@ public:
unsigned nInputs = indexedGenericOp.getNumInputs();
unsigned nOutputs = indexedGenericOp.getNumOutputs();
unsigned nLoops = allIvs.size();
- SmallVector<Value *, 4> indexedValues(nLoops + nInputs + nOutputs);
+ SmallVector<ValuePtr, 4> indexedValues(nLoops + nInputs + nOutputs);
for (unsigned i = 0; i < nLoops; ++i) {
indexedValues[i] = allIvs[i];
diff --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
index f4364928af8..999406e05cf 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
@@ -99,7 +99,7 @@ LogicalResult mlir::linalg::tileAndFuseLinalgOpAndSetMarker(
}
bool mlir::linalg::detail::isProducedByOpOfTypeImpl(
- Operation *consumerOp, Value *consumedView,
+ Operation *consumerOp, ValuePtr consumedView,
function_ref<bool(Operation *)> isaOpType) {
LinalgOp consumer = dyn_cast<LinalgOp>(consumerOp);
if (!consumer)
@@ -175,7 +175,7 @@ LogicalResult mlir::linalg::vectorizeGenericOp(PatternRewriter &rewriter,
return failure();
// TODO(ntv): non-identity layout.
- auto isStaticMemRefWithIdentityLayout = [](Value *v) {
+ auto isStaticMemRefWithIdentityLayout = [](ValuePtr v) {
auto m = v->getType().dyn_cast<MemRefType>();
if (!m || !m.hasStaticShape() || !m.getAffineMaps().empty())
return false;
@@ -235,7 +235,7 @@ mlir::linalg::permuteGenericLinalgOp(PatternRewriter &rewriter, Operation *op,
LogicalResult mlir::linalg::linalgOpPromoteSubviews(PatternRewriter &rewriter,
Operation *op) {
LinalgOp linOp = dyn_cast<LinalgOp>(op);
- SetVector<Value *> subViews;
+ SetVector<ValuePtr> subViews;
for (auto it : linOp.getInputsAndOutputs())
if (auto sv = dyn_cast_or_null<SubViewOp>(it->getDefiningOp()))
subViews.insert(sv);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index c7fbebce383..b1dae455194 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -55,14 +55,15 @@ static llvm::cl::opt<bool> clPromoteDynamic(
llvm::cl::desc("Test generation of dynamic promoted buffers"),
llvm::cl::cat(clOptionsCategory), llvm::cl::init(false));
-static Value *allocBuffer(Type elementType, Value *size, bool dynamicBuffers) {
+static ValuePtr allocBuffer(Type elementType, ValuePtr size,
+ bool dynamicBuffers) {
auto *ctx = size->getContext();
auto width = llvm::divideCeil(elementType.getIntOrFloatBitWidth(), 8);
if (!dynamicBuffers)
if (auto cst = dyn_cast_or_null<ConstantIndexOp>(size->getDefiningOp()))
return alloc(
MemRefType::get(width * cst.getValue(), IntegerType::get(8, ctx)));
- Value *mul = muli(constant_index(width), size);
+ ValuePtr mul = muli(constant_index(width), size);
return alloc(MemRefType::get(-1, IntegerType::get(8, ctx)), mul);
}
@@ -92,20 +93,20 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc,
auto viewType = subView.getType();
auto rank = viewType.getRank();
- Value *allocSize = one;
- SmallVector<Value *, 8> fullRanges, partialRanges;
+ ValuePtr allocSize = one;
+ SmallVector<ValuePtr, 8> fullRanges, partialRanges;
fullRanges.reserve(rank);
partialRanges.reserve(rank);
for (auto en : llvm::enumerate(subView.getRanges())) {
auto rank = en.index();
auto rangeValue = en.value();
- Value *d = rangeValue.size;
+ ValuePtr d = rangeValue.size;
allocSize = muli(folder, allocSize, d).getValue();
fullRanges.push_back(d);
partialRanges.push_back(range(folder, zero, dim(subView, rank), one));
}
SmallVector<int64_t, 4> dynSizes(fullRanges.size(), -1);
- auto *buffer =
+ auto buffer =
allocBuffer(viewType.getElementType(), allocSize, dynamicBuffers);
auto fullLocalView = view(
MemRefType::get(dynSizes, viewType.getElementType()), buffer, fullRanges);
@@ -115,7 +116,7 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc,
SmallVector<PromotionInfo, 8>
mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
- ArrayRef<Value *> subViews, bool dynamicBuffers,
+ ArrayRef<ValuePtr> subViews, bool dynamicBuffers,
OperationFolder *folder) {
if (subViews.empty())
return {};
@@ -123,8 +124,8 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
ScopedContext scope(b, loc);
SmallVector<PromotionInfo, 8> res;
res.reserve(subViews.size());
- DenseMap<Value *, PromotionInfo> promotionInfoMap;
- for (auto *v : subViews) {
+ DenseMap<ValuePtr, PromotionInfo> promotionInfoMap;
+ for (auto v : subViews) {
SubViewOp subView = cast<SubViewOp>(v->getDefiningOp());
auto viewType = subView.getType();
// TODO(ntv): support more cases than just float.
@@ -136,7 +137,7 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
res.push_back(promotionInfo);
}
- for (auto *v : subViews) {
+ for (auto v : subViews) {
SubViewOp subView = cast<SubViewOp>(v->getDefiningOp());
auto info = promotionInfoMap.find(v);
if (info == promotionInfoMap.end())
@@ -144,14 +145,14 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
// TODO(ntv): value to fill with should be related to the operation.
// For now, just use APFloat(0.0f).
auto t = subView.getType().getElementType().cast<FloatType>();
- Value *fillVal = constant_float(folder, APFloat(0.0f), t);
+ ValuePtr fillVal = constant_float(folder, APFloat(0.0f), t);
// TODO(ntv): fill is only necessary if `promotionInfo` has a full local
// view that is different from the partial local view and we are on the
// boundary.
fill(info->second.fullLocalView, fillVal);
}
- for (auto *v : subViews) {
+ for (auto v : subViews) {
auto info = promotionInfoMap.find(v);
if (info == promotionInfoMap.end())
continue;
@@ -161,19 +162,19 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
}
LinalgOp mlir::linalg::promoteSubViewOperands(OpBuilder &b, LinalgOp op,
- SetVector<Value *> subViews,
+ SetVector<ValuePtr> subViews,
bool dynamicBuffers,
OperationFolder *folder) {
// 1. Promote the specified views and use them in the new op.
ScopedContext scope(b, op.getLoc());
auto promotedBufferAndViews = promoteSubViews(
b, op.getLoc(), subViews.getArrayRef(), dynamicBuffers, folder);
- SmallVector<Value *, 8> opViews;
+ SmallVector<ValuePtr, 8> opViews;
opViews.reserve(op.getNumInputsAndOutputs());
- SmallVector<std::pair<Value *, Value *>, 8> writebackViews;
+ SmallVector<std::pair<ValuePtr, ValuePtr>, 8> writebackViews;
writebackViews.reserve(subViews.size());
unsigned promotedIdx = 0;
- for (auto *view : op.getInputsAndOutputs()) {
+ for (auto view : op.getInputsAndOutputs()) {
if (subViews.count(view) != 0) {
opViews.push_back(promotedBufferAndViews[promotedIdx].fullLocalView);
writebackViews.emplace_back(std::make_pair(
@@ -214,7 +215,7 @@ static void promoteSubViews(FuncOp f, bool dynamicBuffers) {
f.walk([dynamicBuffers, &folder, &toErase](LinalgOp op) {
// TODO(ntv) some heuristic here to decide what to promote. Atm it is all or
// nothing.
- SetVector<Value *> subViews;
+ SetVector<ValuePtr> subViews;
OpBuilder b(op);
for (auto it : op.getInputsAndOutputs())
if (auto sv = dyn_cast_or_null<SubViewOp>(it->getDefiningOp()))
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 4d8a24cb6cb..07d559918cf 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -53,7 +53,7 @@ static llvm::cl::list<unsigned>
llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated,
llvm::cl::cat(clOptionsCategory));
-static bool isZero(Value *v) {
+static bool isZero(ValuePtr v) {
return isa_and_nonnull<ConstantIndexOp>(v->getDefiningOp()) &&
cast<ConstantIndexOp>(v->getDefiningOp()).getValue() == 0;
}
@@ -71,12 +71,12 @@ using LoopIndexToRangeIndexMap = DenseMap<int, int>;
// indices of newly created loops.
static std::tuple<SmallVector<SubViewOp::Range, 4>, LoopIndexToRangeIndexMap>
makeTiledLoopRanges(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<Value *> allViewSizes,
- ArrayRef<Value *> allTileSizes, OperationFolder *folder) {
+ ArrayRef<ValuePtr> allViewSizes,
+ ArrayRef<ValuePtr> allTileSizes, OperationFolder *folder) {
assert(allTileSizes.size() == map.getNumResults());
// Apply `map` to get view sizes in loop order.
auto viewSizes = applyMapToValues(b, loc, map, allViewSizes, folder);
- SmallVector<Value *, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
+ SmallVector<ValuePtr, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
// Traverse the tile sizes, which are in loop order, erase zeros everywhere.
LoopIndexToRangeIndexMap loopIndexToRangeIndex;
@@ -110,7 +110,7 @@ namespace {
// `d0 + 2 * d1 + d3` is tiled by [0, 0, 0, 2] but not by [0, 0, 2, 0]
//
struct TileCheck : public AffineExprVisitor<TileCheck> {
- TileCheck(ArrayRef<Value *> tileSizes)
+ TileCheck(ArrayRef<ValuePtr> tileSizes)
: isTiled(false), tileSizes(tileSizes) {}
void visitDimExpr(AffineDimExpr expr) {
@@ -124,7 +124,7 @@ struct TileCheck : public AffineExprVisitor<TileCheck> {
"nonpositive multiplying coefficient");
}
bool isTiled;
- ArrayRef<Value *> tileSizes;
+ ArrayRef<ValuePtr> tileSizes;
};
} // namespace
@@ -206,11 +206,11 @@ void transformIndexedGenericOpIndices(
auto rangeIndex = loopIndexToRangeIndex.find(i);
if (rangeIndex == loopIndexToRangeIndex.end())
continue;
- Value *oldIndex = block.getArgument(i);
+ ValuePtr oldIndex = block.getArgument(i);
// Offset the index argument `i` by the value of the corresponding induction
// variable and replace all uses of the previous value.
- Value *newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex,
- pivs[rangeIndex->second]->getValue());
+ ValuePtr newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex,
+ pivs[rangeIndex->second]->getValue());
for (auto &use : oldIndex->getUses()) {
if (use.getOwner() == newIndex->getDefiningOp())
continue;
@@ -219,7 +219,7 @@ void transformIndexedGenericOpIndices(
}
}
-static bool isTiled(AffineExpr expr, ArrayRef<Value *> tileSizes) {
+static bool isTiled(AffineExpr expr, ArrayRef<ValuePtr> tileSizes) {
if (!expr)
return false;
TileCheck t(tileSizes);
@@ -229,7 +229,7 @@ static bool isTiled(AffineExpr expr, ArrayRef<Value *> tileSizes) {
// Checks whether the view with index `viewIndex` within `linalgOp` varies with
// respect to a non-zero `tileSize`.
-static bool isTiled(AffineMap map, ArrayRef<Value *> tileSizes) {
+static bool isTiled(AffineMap map, ArrayRef<ValuePtr> tileSizes) {
if (!map)
return false;
for (unsigned r = 0; r < map.getNumResults(); ++r)
@@ -238,13 +238,13 @@ static bool isTiled(AffineMap map, ArrayRef<Value *> tileSizes) {
return false;
}
-static SmallVector<Value *, 4>
+static SmallVector<ValuePtr, 4>
makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
- ArrayRef<Value *> ivs, ArrayRef<Value *> tileSizes,
- ArrayRef<Value *> viewSizes, OperationFolder *folder) {
+ ArrayRef<ValuePtr> ivs, ArrayRef<ValuePtr> tileSizes,
+ ArrayRef<ValuePtr> viewSizes, OperationFolder *folder) {
assert(ivs.size() == static_cast<size_t>(llvm::count_if(
llvm::make_range(tileSizes.begin(), tileSizes.end()),
- [](Value *v) { return !isZero(v); })) &&
+ [](ValuePtr v) { return !isZero(v); })) &&
"expected as many ivs as non-zero sizes");
using edsc::intrinsics::select;
@@ -253,21 +253,22 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
// Construct (potentially temporary) mins and maxes on which to apply maps
// that define tile subviews.
- SmallVector<Value *, 8> lbs, subViewSizes;
+ SmallVector<ValuePtr, 8> lbs, subViewSizes;
for (unsigned idx = 0, idxIvs = 0, e = tileSizes.size(); idx < e; ++idx) {
bool isTiled = !isZero(tileSizes[idx]);
- lbs.push_back(isTiled ? ivs[idxIvs++] : (Value *)constant_index(folder, 0));
+ lbs.push_back(isTiled ? ivs[idxIvs++]
+ : (ValuePtr)constant_index(folder, 0));
subViewSizes.push_back(isTiled ? tileSizes[idx] : viewSizes[idx]);
}
auto *op = linalgOp.getOperation();
- SmallVector<Value *, 4> res;
+ SmallVector<ValuePtr, 4> res;
res.reserve(op->getNumOperands());
auto viewIteratorBegin = linalgOp.getInputsAndOutputs().begin();
for (unsigned viewIndex = 0; viewIndex < linalgOp.getNumInputsAndOutputs();
++viewIndex) {
- Value *view = *(viewIteratorBegin + viewIndex);
+ ValuePtr view = *(viewIteratorBegin + viewIndex);
unsigned rank = view->getType().cast<MemRefType>().getRank();
auto map = loopToOperandRangesMaps(linalgOp)[viewIndex];
// If the view is not tiled, we can use it as is.
@@ -277,7 +278,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
}
// Construct a new subview for the tile.
- SmallVector<Value *, 4> offsets, sizes, strides;
+ SmallVector<ValuePtr, 4> offsets, sizes, strides;
offsets.reserve(rank);
sizes.reserve(rank);
strides.reserve(rank);
@@ -292,9 +293,9 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
// Tiling creates a new slice at the proper index, the slice step is 1
// (i.e. the slice view does not subsample, stepping occurs in the loop).
auto m = map.getSubMap({r});
- auto *offset = applyMapToValues(b, loc, m, lbs, folder).front();
+ auto offset = applyMapToValues(b, loc, m, lbs, folder).front();
offsets.push_back(offset);
- auto *size = applyMapToValues(b, loc, m, subViewSizes, folder).front();
+ auto size = applyMapToValues(b, loc, m, subViewSizes, folder).front();
sizes.push_back(size);
strides.push_back(constant_index(folder, 1));
}
@@ -308,7 +309,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
// This is a special type of folding that we only apply when `folder` is
// defined.
if (folder)
- for (auto *v : llvm::concat<Value *>(lbs, subViewSizes))
+ for (auto v : llvm::concat<ValuePtr>(lbs, subViewSizes))
if (v->use_empty())
v->getDefiningOp()->erase();
@@ -316,7 +317,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
}
Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
- OpBuilder &b, LinalgOp op, ArrayRef<Value *> tileSizes,
+ OpBuilder &b, LinalgOp op, ArrayRef<ValuePtr> tileSizes,
ArrayRef<unsigned> permutation, OperationFolder *folder) {
// 1. Enforce the convention that "tiling by zero" skips tiling a particular
// dimension. This convention is significantly simpler to handle instead of
@@ -360,7 +361,7 @@ Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
LoopNestRangeBuilder(pivs, loopRanges)([&] {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
- SmallVector<Value *, 4> ivValues(ivs.begin(), ivs.end());
+ SmallVector<ValuePtr, 4> ivValues(ivs.begin(), ivs.end());
// If we have to apply a permutation to the tiled loop nest, we have to
// reorder the induction variables This permutation is the right one
@@ -411,7 +412,7 @@ Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
ScopedContext scope(b, op.getLoc());
// Materialize concrete tile size values to pass the generic tiling function.
- SmallVector<Value *, 8> tileSizeValues;
+ SmallVector<ValuePtr, 8> tileSizeValues;
tileSizeValues.reserve(tileSizes.size());
for (auto ts : tileSizes)
tileSizeValues.push_back(constant_index(folder, ts));
diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index eb501f9b5b5..125937807f4 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -92,7 +92,7 @@ mlir::edsc::LoopNestRangeBuilder::LoopNestRangeBuilder(
}
mlir::edsc::LoopNestRangeBuilder::LoopNestRangeBuilder(
- ArrayRef<ValueHandle *> ivs, ArrayRef<Value *> ranges)
+ ArrayRef<ValueHandle *> ivs, ArrayRef<ValuePtr> ranges)
: LoopNestRangeBuilder(
ivs, SmallVector<ValueHandle, 4>(ranges.begin(), ranges.end())) {}
@@ -106,26 +106,26 @@ ValueHandle LoopNestRangeBuilder::LoopNestRangeBuilder::operator()(
return ValueHandle::null();
}
-static Value *emitOrFoldComposedAffineApply(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<Value *> operandsRef,
- OperationFolder *folder) {
- SmallVector<Value *, 4> operands(operandsRef.begin(), operandsRef.end());
+static ValuePtr emitOrFoldComposedAffineApply(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<ValuePtr> operandsRef,
+ OperationFolder *folder) {
+ SmallVector<ValuePtr, 4> operands(operandsRef.begin(), operandsRef.end());
fullyComposeAffineMapAndOperands(&map, &operands);
canonicalizeMapAndOperands(&map, &operands);
return folder ? folder->create<AffineApplyOp>(b, loc, map, operands)
: b.create<AffineApplyOp>(loc, map, operands);
}
-SmallVector<Value *, 4>
+SmallVector<ValuePtr, 4>
mlir::linalg::applyMapToValues(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<Value *> values,
+ ArrayRef<ValuePtr> values,
OperationFolder *folder) {
- SmallVector<Value *, 4> res;
+ SmallVector<ValuePtr, 4> res;
res.reserve(map.getNumResults());
unsigned numDims = map.getNumDims();
// For each `expr` in `map`, applies the `expr` to the values extracted from
- // ranges. If the resulting application can be folded into a Value*, the
+ // ranges. If the resulting application can be folded into a Value, the
// folding occurs eagerly. Otherwise, an affine.apply operation is emitted.
for (auto expr : map.getResults()) {
AffineMap map = AffineMap::get(numDims, 0, expr);
@@ -137,12 +137,12 @@ mlir::linalg::applyMapToValues(OpBuilder &b, Location loc, AffineMap map,
/// Returns all the operands of `linalgOp` that are not views.
/// Asserts that these operands are value types to allow transformations like
/// tiling to just use the values when cloning `linalgOp`.
-SmallVector<Value *, 4>
+SmallVector<ValuePtr, 4>
mlir::linalg::getAssumedNonViewOperands(LinalgOp linalgOp) {
auto *op = linalgOp.getOperation();
unsigned numViews = linalgOp.getNumInputsAndOutputs();
unsigned nOperands = op->getNumOperands() - numViews;
- SmallVector<Value *, 4> res;
+ SmallVector<ValuePtr, 4> res;
res.reserve(nOperands);
for (unsigned i = 0; i < nOperands; ++i) {
res.push_back(op->getOperand(numViews + i));
diff --git a/mlir/lib/Dialect/LoopOps/LoopOps.cpp b/mlir/lib/Dialect/LoopOps/LoopOps.cpp
index fc8832e9a46..9610a1ac270 100644
--- a/mlir/lib/Dialect/LoopOps/LoopOps.cpp
+++ b/mlir/lib/Dialect/LoopOps/LoopOps.cpp
@@ -69,8 +69,8 @@ LoopOpsDialect::LoopOpsDialect(MLIRContext *context)
// ForOp
//===----------------------------------------------------------------------===//
-void ForOp::build(Builder *builder, OperationState &result, Value *lb,
- Value *ub, Value *step) {
+void ForOp::build(Builder *builder, OperationState &result, ValuePtr lb,
+ ValuePtr ub, ValuePtr step) {
result.addOperands({lb, ub, step});
Region *bodyRegion = result.addRegion();
ForOp::ensureTerminator(*bodyRegion, *builder, result.location);
@@ -134,7 +134,7 @@ static ParseResult parseForOp(OpAsmParser &parser, OperationState &result) {
Region &ForOp::getLoopBody() { return region(); }
-bool ForOp::isDefinedOutsideOfLoop(Value *value) {
+bool ForOp::isDefinedOutsideOfLoop(ValuePtr value) {
return !region().isAncestor(value->getParentRegion());
}
@@ -144,8 +144,8 @@ LogicalResult ForOp::moveOutOfLoop(ArrayRef<Operation *> ops) {
return success();
}
-ForOp mlir::loop::getForInductionVarOwner(Value *val) {
- auto *ivArg = dyn_cast<BlockArgument>(val);
+ForOp mlir::loop::getForInductionVarOwner(ValuePtr val) {
+ auto ivArg = dyn_cast<BlockArgument>(val);
if (!ivArg)
return ForOp();
assert(ivArg->getOwner() && "unlinked block argument");
@@ -157,7 +157,7 @@ ForOp mlir::loop::getForInductionVarOwner(Value *val) {
// IfOp
//===----------------------------------------------------------------------===//
-void IfOp::build(Builder *builder, OperationState &result, Value *cond,
+void IfOp::build(Builder *builder, OperationState &result, ValuePtr cond,
bool withElseRegion) {
result.addOperands(cond);
Region *thenRegion = result.addRegion();
diff --git a/mlir/lib/Dialect/SPIRV/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/SPIRVDialect.cpp
index def8ee810fe..4416e1e6b04 100644
--- a/mlir/lib/Dialect/SPIRV/SPIRVDialect.cpp
+++ b/mlir/lib/Dialect/SPIRV/SPIRVDialect.cpp
@@ -94,7 +94,7 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface {
/// Handle the given inlined terminator by replacing it with a new operation
/// as necessary.
void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const final {
+ ArrayRef<ValuePtr> valuesToRepl) const final {
// Only spv.ReturnValue needs to be handled here.
auto retValOp = dyn_cast<spirv::ReturnValueOp>(op);
if (!retValOp)
diff --git a/mlir/lib/Dialect/SPIRV/SPIRVLowering.cpp b/mlir/lib/Dialect/SPIRV/SPIRVLowering.cpp
index 284fe915029..ca9b883a703 100644
--- a/mlir/lib/Dialect/SPIRV/SPIRVLowering.cpp
+++ b/mlir/lib/Dialect/SPIRV/SPIRVLowering.cpp
@@ -229,9 +229,9 @@ getOrInsertBuiltinVariable(spirv::ModuleOp &moduleOp, Location loc,
/// Gets the global variable associated with a builtin and add
/// it if it doesn't exist.
-Value *mlir::spirv::getBuiltinVariableValue(Operation *op,
- spirv::BuiltIn builtin,
- OpBuilder &builder) {
+ValuePtr mlir::spirv::getBuiltinVariableValue(Operation *op,
+ spirv::BuiltIn builtin,
+ OpBuilder &builder) {
auto moduleOp = op->getParentOfType<spirv::ModuleOp>();
if (!moduleOp) {
op->emitError("expected operation to be within a SPIR-V module");
@@ -239,7 +239,7 @@ Value *mlir::spirv::getBuiltinVariableValue(Operation *op,
}
spirv::GlobalVariableOp varOp =
getOrInsertBuiltinVariable(moduleOp, op->getLoc(), builtin, builder);
- Value *ptr = builder.create<spirv::AddressOfOp>(op->getLoc(), varOp);
+ ValuePtr ptr = builder.create<spirv::AddressOfOp>(op->getLoc(), varOp);
return builder.create<spirv::LoadOp>(op->getLoc(), ptr,
/*memory_access =*/nullptr,
/*alignment =*/nullptr);
diff --git a/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp
index 0df4525bac6..a20c18056e1 100644
--- a/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp
@@ -273,8 +273,8 @@ static LogicalResult verifyMemorySemantics(BarrierOp op) {
}
template <typename LoadStoreOpTy>
-static LogicalResult verifyLoadStorePtrAndValTypes(LoadStoreOpTy op, Value *ptr,
- Value *val) {
+static LogicalResult verifyLoadStorePtrAndValTypes(LoadStoreOpTy op,
+ ValuePtr ptr, ValuePtr val) {
// ODS already checks ptr is spirv::PointerType. Just check that the pointee
// type of the pointer and the type of the value are the same
//
@@ -664,8 +664,8 @@ static ParseResult parseShiftOp(OpAsmParser &parser, OperationState &state) {
}
static void printShiftOp(Operation *op, OpAsmPrinter &printer) {
- Value *base = op->getOperand(0);
- Value *shift = op->getOperand(1);
+ ValuePtr base = op->getOperand(0);
+ ValuePtr shift = op->getOperand(1);
printer << op->getName() << ' ' << *base << ", " << *shift << " : "
<< base->getType() << ", " << shift->getType();
}
@@ -742,7 +742,7 @@ static Type getElementPtrType(Type type, ValueRange indices, Location baseLoc) {
}
void spirv::AccessChainOp::build(Builder *builder, OperationState &state,
- Value *basePtr, ValueRange indices) {
+ ValuePtr basePtr, ValueRange indices) {
auto type = getElementPtrType(basePtr->getType(), indices, state.location);
assert(type && "Unable to deduce return type based on basePtr and indices");
build(builder, state, type, basePtr, indices);
@@ -782,8 +782,8 @@ static void print(spirv::AccessChainOp op, OpAsmPrinter &printer) {
}
static LogicalResult verify(spirv::AccessChainOp accessChainOp) {
- SmallVector<Value *, 4> indices(accessChainOp.indices().begin(),
- accessChainOp.indices().end());
+ SmallVector<ValuePtr, 4> indices(accessChainOp.indices().begin(),
+ accessChainOp.indices().end());
auto resultType = getElementPtrType(accessChainOp.base_ptr()->getType(),
indices, accessChainOp.getLoc());
if (!resultType) {
@@ -824,7 +824,7 @@ struct CombineChainedAccessChain
}
// Combine indices.
- SmallVector<Value *, 4> indices(parentAccessChainOp.indices());
+ SmallVector<ValuePtr, 4> indices(parentAccessChainOp.indices());
indices.append(accessChainOp.indices().begin(),
accessChainOp.indices().end());
@@ -1060,7 +1060,7 @@ static LogicalResult verify(spirv::BitFieldInsertOp bitFieldOp) {
static ParseResult parseBranchOp(OpAsmParser &parser, OperationState &state) {
Block *dest;
- SmallVector<Value *, 4> destOperands;
+ SmallVector<ValuePtr, 4> destOperands;
if (parser.parseSuccessorAndUseList(dest, destOperands))
return failure();
state.addSuccessor(dest, destOperands);
@@ -1089,7 +1089,7 @@ static ParseResult parseBranchConditionalOp(OpAsmParser &parser,
auto &builder = parser.getBuilder();
OpAsmParser::OperandType condInfo;
Block *dest;
- SmallVector<Value *, 4> destOperands;
+ SmallVector<ValuePtr, 4> destOperands;
// Parse the condition.
Type boolTy = builder.getI1Type();
@@ -1214,7 +1214,7 @@ static void print(spirv::CompositeConstructOp compositeConstructOp,
static LogicalResult verify(spirv::CompositeConstructOp compositeConstructOp) {
auto cType = compositeConstructOp.getType().cast<spirv::CompositeType>();
- SmallVector<Value *, 4> constituents(compositeConstructOp.constituents());
+ SmallVector<ValuePtr, 4> constituents(compositeConstructOp.constituents());
if (constituents.size() != cType.getNumElements()) {
return compositeConstructOp.emitError(
"has incorrect number of operands: expected ")
@@ -1239,7 +1239,7 @@ static LogicalResult verify(spirv::CompositeConstructOp compositeConstructOp) {
//===----------------------------------------------------------------------===//
void spirv::CompositeExtractOp::build(Builder *builder, OperationState &state,
- Value *composite,
+ ValuePtr composite,
ArrayRef<int32_t> indices) {
auto indexAttr = builder->getI32ArrayAttr(indices);
auto elementType =
@@ -1963,7 +1963,7 @@ OpFoldResult spirv::ISubOp::fold(ArrayRef<Attribute> operands) {
//===----------------------------------------------------------------------===//
void spirv::LoadOp::build(Builder *builder, OperationState &state,
- Value *basePtr, IntegerAttr memory_access,
+ ValuePtr basePtr, IntegerAttr memory_access,
IntegerAttr alignment) {
auto ptrType = basePtr->getType().cast<spirv::PointerType>();
build(builder, state, ptrType.getPointeeType(), basePtr, memory_access,
@@ -2497,7 +2497,8 @@ static LogicalResult verify(spirv::ReturnValueOp retValOp) {
//===----------------------------------------------------------------------===//
void spirv::SelectOp::build(Builder *builder, OperationState &state,
- Value *cond, Value *trueValue, Value *falseValue) {
+ ValuePtr cond, ValuePtr trueValue,
+ ValuePtr falseValue) {
build(builder, state, trueValue->getType(), cond, trueValue, falseValue);
}
@@ -2698,9 +2699,9 @@ struct ConvertSelectionOpToSelect
return matchFailure();
}
- auto *trueValue = getSrcValue(trueBlock);
- auto *falseValue = getSrcValue(falseBlock);
- auto *ptrValue = getDstPtr(trueBlock);
+ auto trueValue = getSrcValue(trueBlock);
+ auto falseValue = getSrcValue(falseBlock);
+ auto ptrValue = getDstPtr(trueBlock);
auto storeOpAttributes =
cast<spirv::StoreOp>(trueBlock->front()).getOperation()->getAttrs();
@@ -2747,13 +2748,13 @@ private:
}
// Returns a soruce value for the given block.
- Value *getSrcValue(Block *block) const {
+ ValuePtr getSrcValue(Block *block) const {
auto storeOp = cast<spirv::StoreOp>(block->front());
return storeOp.value();
}
// Returns a destination value for the given block.
- Value *getDstPtr(Block *block) const {
+ ValuePtr getDstPtr(Block *block) const {
auto storeOp = cast<spirv::StoreOp>(block->front());
return storeOp.ptr();
}
diff --git a/mlir/lib/Dialect/SPIRV/Serialization/Deserializer.cpp b/mlir/lib/Dialect/SPIRV/Serialization/Deserializer.cpp
index df9cb47a562..799828cb629 100644
--- a/mlir/lib/Dialect/SPIRV/Serialization/Deserializer.cpp
+++ b/mlir/lib/Dialect/SPIRV/Serialization/Deserializer.cpp
@@ -327,7 +327,7 @@ private:
/// This method materializes normal constants and inserts "casting" ops
/// (`spv._address_of` and `spv._reference_of`) to turn an symbol into a SSA
/// value for handling uses of module scope constants/variables in functions.
- Value *getValue(uint32_t id);
+ ValuePtr getValue(uint32_t id);
/// Slices the first instruction out of `binary` and returns its opcode and
/// operands via `opcode` and `operands` respectively. Returns failure if
@@ -446,7 +446,7 @@ private:
DenseMap<Block *, BlockPhiInfo> blockPhiInfo;
// Result <id> to value mapping.
- DenseMap<uint32_t, Value *> valueMap;
+ DenseMap<uint32_t, ValuePtr> valueMap;
// Mapping from result <id> to undef value of a type.
DenseMap<uint32_t, Type> undefMap;
@@ -1520,7 +1520,7 @@ Deserializer::processBranchConditional(ArrayRef<uint32_t> operands) {
"false label, and optionally two branch weights");
}
- auto *condition = getValue(operands[0]);
+ auto condition = getValue(operands[0]);
auto *trueBlock = getOrCreateBlock(operands[1]);
auto *falseBlock = getOrCreateBlock(operands[2]);
@@ -1531,8 +1531,8 @@ Deserializer::processBranchConditional(ArrayRef<uint32_t> operands) {
opBuilder.create<spirv::BranchConditionalOp>(
unknownLoc, condition, trueBlock,
- /*trueArguments=*/ArrayRef<Value *>(), falseBlock,
- /*falseArguments=*/ArrayRef<Value *>(), weights);
+ /*trueArguments=*/ArrayRef<ValuePtr>(), falseBlock,
+ /*falseArguments=*/ArrayRef<ValuePtr>(), weights);
return success();
}
@@ -1626,7 +1626,7 @@ LogicalResult Deserializer::processPhi(ArrayRef<uint32_t> operands) {
// Create a block argument for this OpPhi instruction.
Type blockArgType = getType(operands[0]);
- BlockArgument *blockArg = curBlock->addArgument(blockArgType);
+ BlockArgumentPtr blockArg = curBlock->addArgument(blockArgType);
valueMap[operands[1]] = blockArg;
LLVM_DEBUG(llvm::dbgs() << "[phi] created block argument " << blockArg
<< " id = " << operands[1] << " of type "
@@ -1783,8 +1783,8 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() {
LLVM_DEBUG(llvm::dbgs() << "[cf] cloned block " << newBlock
<< " from block " << block << "\n");
if (!isFnEntryBlock(block)) {
- for (BlockArgument *blockArg : block->getArguments()) {
- auto *newArg = newBlock->addArgument(blockArg->getType());
+ for (BlockArgumentPtr blockArg : block->getArguments()) {
+ auto newArg = newBlock->addArgument(blockArg->getType());
mapper.map(blockArg, newArg);
LLVM_DEBUG(llvm::dbgs() << "[cf] remapped block argument " << blockArg
<< " to " << newArg << '\n');
@@ -1801,10 +1801,10 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() {
// Go through all ops and remap the operands.
auto remapOperands = [&](Operation *op) {
for (auto &operand : op->getOpOperands())
- if (auto *mappedOp = mapper.lookupOrNull(operand.get()))
+ if (auto mappedOp = mapper.lookupOrNull(operand.get()))
operand.set(mappedOp);
for (auto &succOp : op->getBlockOperands())
- if (auto *mappedOp = mapper.lookupOrNull(succOp.get()))
+ if (auto mappedOp = mapper.lookupOrNull(succOp.get()))
succOp.set(mappedOp);
};
for (auto &block : body) {
@@ -1824,13 +1824,13 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() {
// we place the selection/loop op inside the old merge block, we need to
// make sure the old merge block has the same block argument list.
assert(mergeBlock->args_empty() && "OpPhi in loop merge block unsupported");
- for (BlockArgument *blockArg : headerBlock->getArguments()) {
+ for (BlockArgumentPtr blockArg : headerBlock->getArguments()) {
mergeBlock->addArgument(blockArg->getType());
}
// If the loop header block has block arguments, make sure the spv.branch op
// matches.
- SmallVector<Value *, 4> blockArgs;
+ SmallVector<ValuePtr, 4> blockArgs;
if (!headerBlock->args_empty())
blockArgs = {mergeBlock->args_begin(), mergeBlock->args_end()};
@@ -1838,7 +1838,7 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() {
// loop header block.
builder.setInsertionPointToEnd(&body.front());
builder.create<spirv::BranchOp>(location, mapper.lookupOrNull(headerBlock),
- ArrayRef<Value *>(blockArgs));
+ ArrayRef<ValuePtr>(blockArgs));
}
// All the blocks cloned into the SelectionOp/LoopOp's region can now be
@@ -1924,10 +1924,10 @@ LogicalResult Deserializer::wireUpBlockArgument() {
auto *op = block->getTerminator();
opBuilder.setInsertionPoint(op);
- SmallVector<Value *, 4> blockArgs;
+ SmallVector<ValuePtr, 4> blockArgs;
blockArgs.reserve(phiInfo.size());
for (uint32_t valueId : phiInfo) {
- if (Value *value = getValue(valueId)) {
+ if (ValuePtr value = getValue(valueId)) {
blockArgs.push_back(value);
LLVM_DEBUG(llvm::dbgs() << "[phi] block argument " << value
<< " id = " << valueId << '\n');
@@ -1996,7 +1996,7 @@ LogicalResult Deserializer::structurizeControlFlow() {
// Instruction
//===----------------------------------------------------------------------===//
-Value *Deserializer::getValue(uint32_t id) {
+ValuePtr Deserializer::getValue(uint32_t id) {
if (auto constInfo = getConstant(id)) {
// Materialize a `spv.constant` op at every use site.
return opBuilder.create<spirv::ConstantOp>(unknownLoc, constInfo->second,
@@ -2192,7 +2192,7 @@ LogicalResult Deserializer::processBitcast(ArrayRef<uint32_t> words) {
}
}
valueID = words[wordIndex++];
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
SmallVector<NamedAttribute, 4> attributes;
if (wordIndex < words.size()) {
auto arg = getValue(words[wordIndex]);
@@ -2366,9 +2366,9 @@ Deserializer::processOp<spirv::FunctionCallOp>(ArrayRef<uint32_t> operands) {
auto functionName = getFunctionSymbol(functionID);
- SmallVector<Value *, 4> arguments;
+ SmallVector<ValuePtr, 4> arguments;
for (auto operand : llvm::drop_begin(operands, 3)) {
- auto *value = getValue(operand);
+ auto value = getValue(operand);
if (!value) {
return emitError(unknownLoc, "unknown <id> ")
<< operand << " used by OpFunctionCall";
diff --git a/mlir/lib/Dialect/SPIRV/Serialization/Serializer.cpp b/mlir/lib/Dialect/SPIRV/Serialization/Serializer.cpp
index 4baac53b89f..9b47045ea61 100644
--- a/mlir/lib/Dialect/SPIRV/Serialization/Serializer.cpp
+++ b/mlir/lib/Dialect/SPIRV/Serialization/Serializer.cpp
@@ -323,7 +323,7 @@ private:
uint32_t opcode,
ArrayRef<uint32_t> operands);
- uint32_t getValueID(Value *val) const { return valueIDMap.lookup(val); }
+ uint32_t getValueID(ValuePtr val) const { return valueIDMap.lookup(val); }
LogicalResult processAddressOfOp(spirv::AddressOfOp addressOfOp);
@@ -414,7 +414,7 @@ private:
DenseMap<Type, uint32_t> undefValIDMap;
/// Map from results of normal operations to their <id>s.
- DenseMap<Value *, uint32_t> valueIDMap;
+ DenseMap<ValuePtr, uint32_t> valueIDMap;
/// Map from extended instruction set name to <id>s.
llvm::StringMap<uint32_t> extendedInstSetIDMap;
@@ -457,7 +457,7 @@ private:
/// placed inside `functions`) here. And then after emitting all blocks, we
/// replace the dummy <id> 0 with the real result <id> by overwriting
/// `functions[offset]`.
- DenseMap<Value *, SmallVector<size_t, 1>> deferredPhiValues;
+ DenseMap<ValuePtr, SmallVector<size_t, 1>> deferredPhiValues;
};
} // namespace
@@ -513,12 +513,12 @@ void Serializer::collect(SmallVectorImpl<uint32_t> &binary) {
void Serializer::printValueIDMap(raw_ostream &os) {
os << "\n= Value <id> Map =\n\n";
for (auto valueIDPair : valueIDMap) {
- Value *val = valueIDPair.first;
+ ValuePtr val = valueIDPair.first;
os << " " << val << " "
<< "id = " << valueIDPair.second << ' ';
if (auto *op = val->getDefiningOp()) {
os << "from op '" << op->getName() << "'";
- } else if (auto *arg = dyn_cast<BlockArgument>(val)) {
+ } else if (auto arg = dyn_cast<BlockArgument>(val)) {
Block *block = arg->getOwner();
os << "from argument of block " << block << ' ';
os << " in op '" << block->getParentOp()->getName() << "'";
@@ -752,7 +752,7 @@ LogicalResult Serializer::processFuncOp(FuncOp op) {
// There might be OpPhi instructions who have value references needing to fix.
for (auto deferredValue : deferredPhiValues) {
- Value *value = deferredValue.first;
+ ValuePtr value = deferredValue.first;
uint32_t id = getValueID(value);
LLVM_DEBUG(llvm::dbgs() << "[phi] fix reference of value " << value
<< " to id = " << id << '\n');
@@ -1402,7 +1402,7 @@ LogicalResult Serializer::emitPhiForBlockArguments(Block *block) {
// Then create OpPhi instruction for each of the block argument.
for (auto argIndex : llvm::seq<unsigned>(0, block->getNumArguments())) {
- BlockArgument *arg = block->getArgument(argIndex);
+ BlockArgumentPtr arg = block->getArgument(argIndex);
// Get the type <id> and result <id> for this OpPhi instruction.
uint32_t phiTypeID = 0;
@@ -1418,7 +1418,7 @@ LogicalResult Serializer::emitPhiForBlockArguments(Block *block) {
phiArgs.push_back(phiID);
for (auto predIndex : llvm::seq<unsigned>(0, predecessors.size())) {
- Value *value = *(predecessors[predIndex].second + argIndex);
+ ValuePtr value = *(predecessors[predIndex].second + argIndex);
uint32_t predBlockId = getOrCreateBlockID(predecessors[predIndex].first);
LLVM_DEBUG(llvm::dbgs() << "[phi] use predecessor (id = " << predBlockId
<< ") value " << value << ' ');
@@ -1784,7 +1784,7 @@ Serializer::processOp<spirv::FunctionCallOp>(spirv::FunctionCallOp op) {
auto funcCallID = getNextID();
SmallVector<uint32_t, 8> operands{resTypeID, funcCallID, funcID};
- for (auto *value : op.arguments()) {
+ for (auto value : op.arguments()) {
auto valueID = getValueID(value);
assert(valueID && "cannot find a value for spv.FunctionCall");
operands.push_back(valueID);
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
index d48b31fe491..93ce2c0a0d5 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
@@ -140,7 +140,7 @@ class FuncOpLowering final : public SPIRVOpLowering<FuncOp> {
public:
using SPIRVOpLowering<FuncOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(FuncOp funcOp, ArrayRef<Value *> operands,
+ matchAndRewrite(FuncOp funcOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -153,7 +153,7 @@ private:
} // namespace
PatternMatchResult
-FuncOpLowering::matchAndRewrite(FuncOp funcOp, ArrayRef<Value *> operands,
+FuncOpLowering::matchAndRewrite(FuncOp funcOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
if (!funcOp.getAttrOfType<spirv::EntryPointABIAttr>(
spirv::getEntryPointABIAttrName())) {
@@ -183,7 +183,7 @@ FuncOpLowering::matchAndRewrite(FuncOp funcOp, ArrayRef<Value *> operands,
OpBuilder::InsertionGuard funcInsertionGuard(rewriter);
rewriter.setInsertionPointToStart(&funcOp.front());
// Insert spirv::AddressOf and spirv::AccessChain operations.
- Value *replacement =
+ ValuePtr replacement =
rewriter.create<spirv::AddressOfOp>(funcOp.getLoc(), var);
// Check if the arg is a scalar or vector type. In that case, the value
// needs to be loaded into registers.
diff --git a/mlir/lib/Dialect/StandardOps/Ops.cpp b/mlir/lib/Dialect/StandardOps/Ops.cpp
index 4116f6f14ae..94166b5a7dd 100644
--- a/mlir/lib/Dialect/StandardOps/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/Ops.cpp
@@ -81,7 +81,7 @@ struct StdInlinerInterface : public DialectInlinerInterface {
/// Handle the given inlined terminator by replacing it with a new operation
/// as necessary.
void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const final {
+ ArrayRef<ValuePtr> valuesToRepl) const final {
// Only "std.return" needs to be handled here.
auto returnOp = cast<ReturnOp>(op);
@@ -184,7 +184,7 @@ void mlir::printDimAndSymbolList(Operation::operand_iterator begin,
// dimension operands parsed.
// Returns 'false' on success and 'true' on error.
ParseResult mlir::parseDimAndSymbolList(OpAsmParser &parser,
- SmallVectorImpl<Value *> &operands,
+ SmallVectorImpl<ValuePtr> &operands,
unsigned &numDims) {
SmallVector<OpAsmParser::OperandType, 8> opInfos;
if (parser.parseOperandList(opInfos, OpAsmParser::Delimiter::Paren))
@@ -325,7 +325,7 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocOp> {
PatternRewriter &rewriter) const override {
// Check to see if any dimensions operands are constants. If so, we can
// substitute and drop them.
- if (llvm::none_of(alloc.getOperands(), [](Value *operand) {
+ if (llvm::none_of(alloc.getOperands(), [](ValuePtr operand) {
return matchPattern(operand, m_ConstantIndex());
}))
return matchFailure();
@@ -336,8 +336,8 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocOp> {
// and keep track of the resultant memref type to build.
SmallVector<int64_t, 4> newShapeConstants;
newShapeConstants.reserve(memrefType.getRank());
- SmallVector<Value *, 4> newOperands;
- SmallVector<Value *, 4> droppedOperands;
+ SmallVector<ValuePtr, 4> newOperands;
+ SmallVector<ValuePtr, 4> droppedOperands;
unsigned dynamicDimPos = 0;
for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
@@ -429,7 +429,7 @@ struct SimplifyBrToBlockWithSinglePred : public OpRewritePattern<BranchOp> {
static ParseResult parseBranchOp(OpAsmParser &parser, OperationState &result) {
Block *dest;
- SmallVector<Value *, 4> destOperands;
+ SmallVector<ValuePtr, 4> destOperands;
if (parser.parseSuccessorAndUseList(dest, destOperands))
return failure();
result.addSuccessor(dest, destOperands);
@@ -623,7 +623,7 @@ static Type getI1SameShape(Builder *build, Type type) {
//===----------------------------------------------------------------------===//
static void buildCmpIOp(Builder *build, OperationState &result,
- CmpIPredicate predicate, Value *lhs, Value *rhs) {
+ CmpIPredicate predicate, ValuePtr lhs, ValuePtr rhs) {
result.addOperands({lhs, rhs});
result.types.push_back(getI1SameShape(build, lhs->getType()));
result.addAttribute(
@@ -777,7 +777,7 @@ CmpFPredicate CmpFOp::getPredicateByName(StringRef name) {
}
static void buildCmpFOp(Builder *build, OperationState &result,
- CmpFPredicate predicate, Value *lhs, Value *rhs) {
+ CmpFPredicate predicate, ValuePtr lhs, ValuePtr rhs) {
result.addOperands({lhs, rhs});
result.types.push_back(getI1SameShape(build, lhs->getType()));
result.addAttribute(
@@ -946,7 +946,7 @@ struct SimplifyConstCondBranchPred : public OpRewritePattern<CondBranchOp> {
static ParseResult parseCondBranchOp(OpAsmParser &parser,
OperationState &result) {
- SmallVector<Value *, 4> destOperands;
+ SmallVector<ValuePtr, 4> destOperands;
Block *dest;
OpAsmParser::OperandType condInfo;
@@ -1088,7 +1088,7 @@ OpFoldResult ConstantOp::fold(ArrayRef<Attribute> operands) {
}
void ConstantOp::getAsmResultNames(
- function_ref<void(Value *, StringRef)> setNameFn) {
+ function_ref<void(ValuePtr, StringRef)> setNameFn) {
Type type = getType();
if (auto intCst = getValue().dyn_cast<IntegerAttr>()) {
IntegerType intTy = type.dyn_cast<IntegerType>();
@@ -1183,7 +1183,7 @@ struct SimplifyDeadDealloc : public OpRewritePattern<DeallocOp> {
PatternMatchResult matchAndRewrite(DeallocOp dealloc,
PatternRewriter &rewriter) const override {
// Check that the memref operand's defining operation is an AllocOp.
- Value *memref = dealloc.memref();
+ ValuePtr memref = dealloc.memref();
if (!isa_and_nonnull<AllocOp>(memref->getDefiningOp()))
return matchFailure();
@@ -1362,11 +1362,11 @@ OpFoldResult UnsignedDivIOp::fold(ArrayRef<Attribute> operands) {
// ---------------------------------------------------------------------------
void DmaStartOp::build(Builder *builder, OperationState &result,
- Value *srcMemRef, ValueRange srcIndices,
- Value *destMemRef, ValueRange destIndices,
- Value *numElements, Value *tagMemRef,
- ValueRange tagIndices, Value *stride,
- Value *elementsPerStride) {
+ ValuePtr srcMemRef, ValueRange srcIndices,
+ ValuePtr destMemRef, ValueRange destIndices,
+ ValuePtr numElements, ValuePtr tagMemRef,
+ ValueRange tagIndices, ValuePtr stride,
+ ValuePtr elementsPerStride) {
result.addOperands(srcMemRef);
result.addOperands(srcIndices);
result.addOperands(destMemRef);
@@ -1507,8 +1507,8 @@ LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
// ---------------------------------------------------------------------------
void DmaWaitOp::build(Builder *builder, OperationState &result,
- Value *tagMemRef, ValueRange tagIndices,
- Value *numElements) {
+ ValuePtr tagMemRef, ValueRange tagIndices,
+ ValuePtr numElements) {
result.addOperands(tagMemRef);
result.addOperands(tagIndices);
result.addOperands(numElements);
@@ -2025,7 +2025,7 @@ static LogicalResult verify(SelectOp op) {
}
OpFoldResult SelectOp::fold(ArrayRef<Attribute> operands) {
- auto *condition = getCondition();
+ auto condition = getCondition();
// select true, %0, %1 => %0
if (matchPattern(condition, m_One()))
@@ -2357,7 +2357,7 @@ static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) {
static void print(OpAsmPrinter &p, ViewOp op) {
p << op.getOperationName() << ' ' << *op.getOperand(0) << '[';
- auto *dynamicOffset = op.getDynamicOffset();
+ auto dynamicOffset = op.getDynamicOffset();
if (dynamicOffset != nullptr)
p.printOperand(dynamicOffset);
p << "][" << op.getDynamicSizes() << ']';
@@ -2365,7 +2365,7 @@ static void print(OpAsmPrinter &p, ViewOp op) {
p << " : " << op.getOperand(0)->getType() << " to " << op.getType();
}
-Value *ViewOp::getDynamicOffset() {
+ValuePtr ViewOp::getDynamicOffset() {
int64_t offset;
SmallVector<int64_t, 4> strides;
auto result =
@@ -2440,7 +2440,7 @@ struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
PatternMatchResult matchAndRewrite(ViewOp viewOp,
PatternRewriter &rewriter) const override {
// Return if none of the operands are constants.
- if (llvm::none_of(viewOp.getOperands(), [](Value *operand) {
+ if (llvm::none_of(viewOp.getOperands(), [](ValuePtr operand) {
return matchPattern(operand, m_ConstantIndex());
}))
return matchFailure();
@@ -2457,11 +2457,11 @@ struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
return matchFailure();
- SmallVector<Value *, 4> newOperands;
- SmallVector<Value *, 4> droppedOperands;
+ SmallVector<ValuePtr, 4> newOperands;
+ SmallVector<ValuePtr, 4> droppedOperands;
// Fold dynamic offset operand if it is produced by a constant.
- auto *dynamicOffset = viewOp.getDynamicOffset();
+ auto dynamicOffset = viewOp.getDynamicOffset();
int64_t newOffset = oldOffset;
unsigned dynamicOffsetOperandCount = 0;
if (dynamicOffset != nullptr) {
@@ -2576,7 +2576,7 @@ static Type inferSubViewResultType(MemRefType memRefType) {
memRefType.getMemorySpace());
}
-void mlir::SubViewOp::build(Builder *b, OperationState &result, Value *source,
+void mlir::SubViewOp::build(Builder *b, OperationState &result, ValuePtr source,
ValueRange offsets, ValueRange sizes,
ValueRange strides, Type resultType,
ArrayRef<NamedAttribute> attrs) {
@@ -2590,7 +2590,7 @@ void mlir::SubViewOp::build(Builder *b, OperationState &result, Value *source,
}
void mlir::SubViewOp::build(Builder *b, OperationState &result, Type resultType,
- Value *source) {
+ ValuePtr source) {
build(b, result, source, /*offsets=*/{}, /*sizes=*/{}, /*strides=*/{},
resultType);
}
@@ -2826,7 +2826,7 @@ public:
// Follow all or nothing approach for shapes for now. If all the operands
// for sizes are constants then fold it into the type of the result memref.
if (subViewType.hasStaticShape() ||
- llvm::any_of(subViewOp.sizes(), [](Value *operand) {
+ llvm::any_of(subViewOp.sizes(), [](ValuePtr operand) {
return !matchPattern(operand, m_ConstantIndex());
})) {
return matchFailure();
@@ -2842,7 +2842,7 @@ public:
subViewType.getMemorySpace());
auto newSubViewOp = rewriter.create<SubViewOp>(
subViewOp.getLoc(), subViewOp.source(), subViewOp.offsets(),
- ArrayRef<Value *>(), subViewOp.strides(), newMemRefType);
+ ArrayRef<ValuePtr>(), subViewOp.strides(), newMemRefType);
// Insert a memref_cast for compatibility of the uses of the op.
rewriter.replaceOpWithNewOp<MemRefCastOp>(
subViewOp.sizes(), subViewOp, newSubViewOp, subViewOp.getType());
@@ -2871,7 +2871,7 @@ public:
failed(getStridesAndOffset(subViewType, resultStrides, resultOffset)) ||
llvm::is_contained(baseStrides,
MemRefType::getDynamicStrideOrOffset()) ||
- llvm::any_of(subViewOp.strides(), [](Value *stride) {
+ llvm::any_of(subViewOp.strides(), [](ValuePtr stride) {
return !matchPattern(stride, m_ConstantIndex());
})) {
return matchFailure();
@@ -2892,7 +2892,7 @@ public:
layoutMap, subViewType.getMemorySpace());
auto newSubViewOp = rewriter.create<SubViewOp>(
subViewOp.getLoc(), subViewOp.source(), subViewOp.offsets(),
- subViewOp.sizes(), ArrayRef<Value *>(), newMemRefType);
+ subViewOp.sizes(), ArrayRef<ValuePtr>(), newMemRefType);
// Insert a memref_cast for compatibility of the uses of the op.
rewriter.replaceOpWithNewOp<MemRefCastOp>(
subViewOp.strides(), subViewOp, newSubViewOp, subViewOp.getType());
@@ -2922,7 +2922,7 @@ public:
llvm::is_contained(baseStrides,
MemRefType::getDynamicStrideOrOffset()) ||
baseOffset == MemRefType::getDynamicStrideOrOffset() ||
- llvm::any_of(subViewOp.offsets(), [](Value *stride) {
+ llvm::any_of(subViewOp.offsets(), [](ValuePtr stride) {
return !matchPattern(stride, m_ConstantIndex());
})) {
return matchFailure();
@@ -2943,7 +2943,7 @@ public:
MemRefType::get(subViewType.getShape(), subViewType.getElementType(),
layoutMap, subViewType.getMemorySpace());
auto newSubViewOp = rewriter.create<SubViewOp>(
- subViewOp.getLoc(), subViewOp.source(), ArrayRef<Value *>(),
+ subViewOp.getLoc(), subViewOp.source(), ArrayRef<ValuePtr>(),
subViewOp.sizes(), subViewOp.strides(), newMemRefType);
// Insert a memref_cast for compatibility of the uses of the op.
rewriter.replaceOpWithNewOp<MemRefCastOp>(
diff --git a/mlir/lib/Dialect/VectorOps/VectorOps.cpp b/mlir/lib/Dialect/VectorOps/VectorOps.cpp
index 6a3ff74afcd..18c1714f403 100644
--- a/mlir/lib/Dialect/VectorOps/VectorOps.cpp
+++ b/mlir/lib/Dialect/VectorOps/VectorOps.cpp
@@ -72,7 +72,7 @@ ArrayAttr vector::getVectorSubscriptAttr(Builder &builder,
//===----------------------------------------------------------------------===//
void vector::ContractionOp::build(Builder *builder, OperationState &result,
- Value *lhs, Value *rhs, Value *acc,
+ ValuePtr lhs, ValuePtr rhs, ValuePtr acc,
ArrayAttr indexingMaps,
ArrayAttr iteratorTypes) {
result.addOperands({lhs, rhs, acc});
@@ -404,7 +404,7 @@ static Type inferExtractOpResultType(VectorType vectorType,
}
void vector::ExtractOp::build(Builder *builder, OperationState &result,
- Value *source, ArrayRef<int64_t> position) {
+ ValuePtr source, ArrayRef<int64_t> position) {
result.addOperands(source);
auto positionAttr = getVectorSubscriptAttr(*builder, position);
result.addTypes(inferExtractOpResultType(source->getType().cast<VectorType>(),
@@ -471,7 +471,7 @@ static LogicalResult verify(vector::ExtractOp op) {
//===----------------------------------------------------------------------===//
void ExtractSlicesOp::build(Builder *builder, OperationState &result,
- TupleType tupleType, Value *vector,
+ TupleType tupleType, ValuePtr vector,
ArrayRef<int64_t> sizes,
ArrayRef<int64_t> strides) {
result.addOperands(vector);
@@ -647,8 +647,8 @@ static ParseResult parseBroadcastOp(OpAsmParser &parser,
// ShuffleOp
//===----------------------------------------------------------------------===//
-void ShuffleOp::build(Builder *builder, OperationState &result, Value *v1,
- Value *v2, ArrayRef<int64_t> mask) {
+void ShuffleOp::build(Builder *builder, OperationState &result, ValuePtr v1,
+ ValuePtr v2, ArrayRef<int64_t> mask) {
result.addOperands({v1, v2});
auto maskAttr = getVectorSubscriptAttr(*builder, mask);
result.addTypes(v1->getType());
@@ -771,8 +771,8 @@ static LogicalResult verify(InsertElementOp op) {
// InsertOp
//===----------------------------------------------------------------------===//
-void InsertOp::build(Builder *builder, OperationState &result, Value *source,
- Value *dest, ArrayRef<int64_t> position) {
+void InsertOp::build(Builder *builder, OperationState &result, ValuePtr source,
+ ValuePtr dest, ArrayRef<int64_t> position) {
result.addOperands({source, dest});
auto positionAttr = getVectorSubscriptAttr(*builder, position);
result.addTypes(dest->getType());
@@ -893,7 +893,7 @@ void InsertSlicesOp::getStrides(SmallVectorImpl<int64_t> &results) {
//===----------------------------------------------------------------------===//
void InsertStridedSliceOp::build(Builder *builder, OperationState &result,
- Value *source, Value *dest,
+ ValuePtr source, ValuePtr dest,
ArrayRef<int64_t> offsets,
ArrayRef<int64_t> strides) {
result.addOperands({source, dest});
@@ -1201,17 +1201,17 @@ static LogicalResult verify(ReshapeOp op) {
// If all shape operands are produced by constant ops, verify that product
// of dimensions for input/output shape match.
- auto isDefByConstant = [](Value *operand) {
+ auto isDefByConstant = [](ValuePtr operand) {
return isa_and_nonnull<ConstantIndexOp>(operand->getDefiningOp());
};
if (llvm::all_of(op.input_shape(), isDefByConstant) &&
llvm::all_of(op.output_shape(), isDefByConstant)) {
int64_t numInputElements = 1;
- for (auto *operand : op.input_shape())
+ for (auto operand : op.input_shape())
numInputElements *=
cast<ConstantIndexOp>(operand->getDefiningOp()).getValue();
int64_t numOutputElements = 1;
- for (auto *operand : op.output_shape())
+ for (auto operand : op.output_shape())
numOutputElements *=
cast<ConstantIndexOp>(operand->getDefiningOp()).getValue();
if (numInputElements != numOutputElements)
@@ -1247,7 +1247,7 @@ static Type inferStridedSliceOpResultType(VectorType vectorType,
}
void StridedSliceOp::build(Builder *builder, OperationState &result,
- Value *source, ArrayRef<int64_t> offsets,
+ ValuePtr source, ArrayRef<int64_t> offsets,
ArrayRef<int64_t> sizes, ArrayRef<int64_t> strides) {
result.addOperands(source);
auto offsetsAttr = getVectorSubscriptAttr(*builder, offsets);
@@ -1603,7 +1603,7 @@ static MemRefType inferVectorTypeCastResultType(MemRefType t) {
}
void TypeCastOp::build(Builder *builder, OperationState &result,
- Value *source) {
+ ValuePtr source) {
result.addOperands(source);
result.addTypes(
inferVectorTypeCastResultType(source->getType().cast<MemRefType>()));
@@ -1793,14 +1793,14 @@ public:
PatternMatchResult matchAndRewrite(CreateMaskOp createMaskOp,
PatternRewriter &rewriter) const override {
// Return if any of 'createMaskOp' operands are not defined by a constant.
- auto is_not_def_by_constant = [](Value *operand) {
+ auto is_not_def_by_constant = [](ValuePtr operand) {
return !isa_and_nonnull<ConstantIndexOp>(operand->getDefiningOp());
};
if (llvm::any_of(createMaskOp.operands(), is_not_def_by_constant))
return matchFailure();
// Gather constant mask dimension sizes.
SmallVector<int64_t, 4> maskDimSizes;
- for (auto *operand : createMaskOp.operands()) {
+ for (auto operand : createMaskOp.operands()) {
auto defOp = operand->getDefiningOp();
maskDimSizes.push_back(cast<ConstantIndexOp>(defOp).getValue());
}
diff --git a/mlir/lib/Dialect/VectorOps/VectorTransforms.cpp b/mlir/lib/Dialect/VectorOps/VectorTransforms.cpp
index 64cacb28720..e5c281cbf64 100644
--- a/mlir/lib/Dialect/VectorOps/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/VectorOps/VectorTransforms.cpp
@@ -106,17 +106,17 @@ static SmallVector<int64_t, 8> delinearize(int64_t linearIndex,
// `resultTypes`.
static Operation *cloneOpWithOperandsAndTypes(PatternRewriter &builder,
Location loc, Operation *op,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
ArrayRef<Type> resultTypes) {
OperationState res(loc, op->getName().getStringRef(), operands, resultTypes,
op->getAttrs());
return builder.createOperation(res);
}
-static Value *makeSplatZero(Location loc, PatternRewriter &rewriter,
- VectorType vt) {
+static ValuePtr makeSplatZero(Location loc, PatternRewriter &rewriter,
+ VectorType vt) {
auto t = vt.getElementType();
- Value *f = nullptr;
+ ValuePtr f = nullptr;
if (t.isBF16() || t.isF16())
f = rewriter.create<ConstantOp>(loc, t, rewriter.getF64FloatAttr(0.0f));
else if (t.isF32())
@@ -190,12 +190,12 @@ struct UnrolledVectorState {
SmallVector<int64_t, 4> unrollFactors;
SmallVector<int64_t, 8> basis;
int64_t numInstances;
- Value *slicesTuple;
+ ValuePtr slicesTuple;
};
// Populates 'state' with unrolled shape, unroll factors, basis and
// num unrolled instances for 'vectorType'.
-static void initUnrolledVectorState(VectorType vectorType, Value *initValue,
+static void initUnrolledVectorState(VectorType vectorType, ValuePtr initValue,
const DenseMap<int64_t, int64_t> &indexMap,
ArrayRef<int64_t> targetShape,
UnrolledVectorState &state,
@@ -239,10 +239,10 @@ getUnrolledVectorLinearIndex(UnrolledVectorState &state,
// Returns an unrolled vector at 'vectorOffsets' within the vector
// represented by 'state'. The vector is created from a slice of 'initValue'
// if not present in 'cache'.
-static Value *getOrCreateUnrolledVectorSlice(
+static ValuePtr getOrCreateUnrolledVectorSlice(
Location loc, UnrolledVectorState &state, ArrayRef<int64_t> vectorOffsets,
ArrayRef<int64_t> offsets, DenseMap<int64_t, int64_t> &indexMap,
- Value *initValue, SmallVectorImpl<Value *> &cache,
+ ValuePtr initValue, SmallVectorImpl<ValuePtr> &cache,
PatternRewriter &builder) {
// Compute slice offsets.
SmallVector<int64_t, 4> sliceOffsets(state.unrolledShape.size());
@@ -253,7 +253,7 @@ static Value *getOrCreateUnrolledVectorSlice(
int64_t sliceLinearIndex =
getUnrolledVectorLinearIndex(state, vectorOffsets, indexMap);
assert(sliceLinearIndex < static_cast<int64_t>(cache.size()));
- auto *valueSlice = cache[sliceLinearIndex];
+ auto valueSlice = cache[sliceLinearIndex];
if (valueSlice == nullptr) {
// Return tuple element at 'sliceLinearIndex'.
auto tupleIndex = builder.getI64IntegerAttr(sliceLinearIndex);
@@ -330,12 +330,10 @@ struct VectorState {
// TODO(andydavis) Generalize this to support structured ops beyond
// vector ContractionOp, and merge it with 'unrollSingleResultOpMatchingType'
-static Value *unrollSingleResultStructuredOp(Operation *op,
- ArrayRef<int64_t> iterationBounds,
- std::vector<VectorState> &vectors,
- unsigned resultIndex,
- ArrayRef<int64_t> targetShape,
- PatternRewriter &builder) {
+static ValuePtr unrollSingleResultStructuredOp(
+ Operation *op, ArrayRef<int64_t> iterationBounds,
+ std::vector<VectorState> &vectors, unsigned resultIndex,
+ ArrayRef<int64_t> targetShape, PatternRewriter &builder) {
auto shapedType = op->getResult(0)->getType().dyn_cast_or_null<ShapedType>();
if (!shapedType || !shapedType.hasStaticShape())
assert(false && "Expected a statically shaped result type");
@@ -351,7 +349,7 @@ static Value *unrollSingleResultStructuredOp(Operation *op,
SmallVector<UnrolledVectorState, 3> unrolledVectorState(numVectors);
for (unsigned i = 0; i < numVectors; ++i) {
int64_t operandIndex = vectors[i].operandIndex;
- auto *operand = operandIndex >= 0 ? op->getOperand(operandIndex) : nullptr;
+ auto operand = operandIndex >= 0 ? op->getOperand(operandIndex) : nullptr;
initUnrolledVectorState(vectors[i].type, operand, vectors[i].indexMap,
targetShape, unrolledVectorState[i], builder);
}
@@ -364,7 +362,7 @@ static Value *unrollSingleResultStructuredOp(Operation *op,
shapedType.getElementType());
// Initialize caches for intermediate vector results.
- std::vector<SmallVector<Value *, 4>> caches(numVectors);
+ std::vector<SmallVector<ValuePtr, 4>> caches(numVectors);
for (unsigned i = 0; i < numVectors; ++i)
caches[i].resize(unrolledVectorState[i].numInstances);
@@ -376,13 +374,13 @@ static Value *unrollSingleResultStructuredOp(Operation *op,
auto offsets = zipMap([](int64_t v1, int64_t v2) { return v1 * v2; },
vectorOffsets, targetShape);
// Get cached slice (or create slice) for each operand at 'offsets'.
- SmallVector<Value *, 3> operands;
+ SmallVector<ValuePtr, 3> operands;
operands.resize(op->getNumOperands());
for (unsigned i = 0; i < numVectors; ++i) {
int64_t operandIndex = vectors[i].operandIndex;
if (operandIndex < 0)
continue; // Output
- auto *operand = op->getOperand(operandIndex);
+ auto operand = op->getOperand(operandIndex);
operands[operandIndex] = getOrCreateUnrolledVectorSlice(
op->getLoc(), unrolledVectorState[i], vectorOffsets, offsets,
vectors[i].indexMap, operand, caches[i], builder);
@@ -402,21 +400,21 @@ static Value *unrollSingleResultStructuredOp(Operation *op,
// Create TupleOp of unrolled result vectors.
SmallVector<Type, 4> vectorTupleTypes(resultValueState.numInstances);
- SmallVector<Value *, 4> vectorTupleValues(resultValueState.numInstances);
+ SmallVector<ValuePtr, 4> vectorTupleValues(resultValueState.numInstances);
for (unsigned i = 0; i < resultValueState.numInstances; ++i) {
vectorTupleTypes[i] = caches[resultIndex][i]->getType().cast<VectorType>();
vectorTupleValues[i] = caches[resultIndex][i];
}
TupleType tupleType = builder.getTupleType(vectorTupleTypes);
- Value *tupleOp = builder.create<vector::TupleOp>(op->getLoc(), tupleType,
- vectorTupleValues);
+ ValuePtr tupleOp = builder.create<vector::TupleOp>(op->getLoc(), tupleType,
+ vectorTupleValues);
// Create InsertSlicesOp(Tuple(result_vectors)).
auto resultVectorType = op->getResult(0)->getType().cast<VectorType>();
SmallVector<int64_t, 4> sizes(resultValueState.unrolledShape);
SmallVector<int64_t, 4> strides(resultValueState.unrollFactors.size(), 1);
- Value *insertSlicesOp = builder.create<vector::InsertSlicesOp>(
+ ValuePtr insertSlicesOp = builder.create<vector::InsertSlicesOp>(
op->getLoc(), resultVectorType, tupleOp, builder.getI64ArrayAttr(sizes),
builder.getI64ArrayAttr(strides));
return insertSlicesOp;
@@ -487,7 +485,7 @@ getVectorElementwiseOpUnrollState(Operation *op, ArrayRef<int64_t> targetShape,
}
// Entry point for unrolling declarative pattern rewrites.
-Value *mlir::vector::unrollSingleResultOpMatchingType(
+ValuePtr mlir::vector::unrollSingleResultOpMatchingType(
PatternRewriter &builder, Operation *op, ArrayRef<int64_t> targetShape) {
assert(op->getNumResults() == 1 && "Expected single result operation");
@@ -516,8 +514,8 @@ Value *mlir::vector::unrollSingleResultOpMatchingType(
static void
generateTransferOpSlices(VectorType vectorType, TupleType tupleType,
ArrayRef<int64_t> sizes, ArrayRef<int64_t> strides,
- ArrayRef<Value *> indices, PatternRewriter &rewriter,
- function_ref<void(unsigned, ArrayRef<Value *>)> fn) {
+ ArrayRef<ValuePtr> indices, PatternRewriter &rewriter,
+ function_ref<void(unsigned, ArrayRef<ValuePtr>)> fn) {
// Compute strides w.r.t. to slice counts in each dimension.
auto maybeDimSliceCounts = shapeRatio(vectorType.getShape(), sizes);
assert(maybeDimSliceCounts.hasValue());
@@ -534,13 +532,13 @@ generateTransferOpSlices(VectorType vectorType, TupleType tupleType,
auto offsets = zipMap([](int64_t v1, int64_t v2) { return v1 * v2; },
vectorOffsets, sizes);
// Compute 'sliceIndices' by adding 'sliceOffsets[i]' to 'indices[i]'.
- SmallVector<Value *, 4> sliceIndices(numSliceIndices);
+ SmallVector<ValuePtr, 4> sliceIndices(numSliceIndices);
for (auto it : llvm::enumerate(indices)) {
auto expr = getAffineDimExpr(0, ctx) +
getAffineConstantExpr(offsets[it.index()], ctx);
auto map = AffineMap::get(/*dimCount=*/1, /*symbolCount=*/0, expr);
sliceIndices[it.index()] = rewriter.create<AffineApplyOp>(
- it.value()->getLoc(), map, ArrayRef<Value *>(it.value()));
+ it.value()->getLoc(), map, ArrayRef<ValuePtr>(it.value()));
}
// Call 'fn' to generate slice 'i' at 'sliceIndices'.
fn(i, sliceIndices);
@@ -559,7 +557,7 @@ struct SplitTransferReadOp : public OpRewritePattern<vector::TransferReadOp> {
if (!xferReadOp.permutation_map().isIdentity())
return matchFailure();
// Return unless the unique 'xferReadOp' user is an ExtractSlicesOp.
- Value *xferReadResult = xferReadOp.getResult();
+ ValuePtr xferReadResult = xferReadOp.getResult();
auto extractSlicesOp =
dyn_cast<vector::ExtractSlicesOp>(*xferReadResult->getUsers().begin());
if (!xferReadResult->hasOneUse() || !extractSlicesOp)
@@ -576,10 +574,10 @@ struct SplitTransferReadOp : public OpRewritePattern<vector::TransferReadOp> {
Location loc = xferReadOp.getLoc();
int64_t numSlices = resultTupleType.size();
- SmallVector<Value *, 4> vectorTupleValues(numSlices);
- SmallVector<Value *, 4> indices(xferReadOp.indices().begin(),
- xferReadOp.indices().end());
- auto createSlice = [&](unsigned index, ArrayRef<Value *> sliceIndices) {
+ SmallVector<ValuePtr, 4> vectorTupleValues(numSlices);
+ SmallVector<ValuePtr, 4> indices(xferReadOp.indices().begin(),
+ xferReadOp.indices().end());
+ auto createSlice = [&](unsigned index, ArrayRef<ValuePtr> sliceIndices) {
// Get VectorType for slice 'i'.
auto sliceVectorType = resultTupleType.getType(index);
// Create split TransferReadOp for 'sliceUser'.
@@ -591,8 +589,8 @@ struct SplitTransferReadOp : public OpRewritePattern<vector::TransferReadOp> {
indices, rewriter, createSlice);
// Create tuple of splice xfer read operations.
- Value *tupleOp = rewriter.create<vector::TupleOp>(loc, resultTupleType,
- vectorTupleValues);
+ ValuePtr tupleOp = rewriter.create<vector::TupleOp>(loc, resultTupleType,
+ vectorTupleValues);
// Replace 'xferReadOp' with result 'insertSlicesResult'.
rewriter.replaceOpWithNewOp<vector::InsertSlicesOp>(
xferReadOp, sourceVectorType, tupleOp, extractSlicesOp.sizes(),
@@ -632,9 +630,9 @@ struct SplitTransferWriteOp : public OpRewritePattern<vector::TransferWriteOp> {
insertSlicesOp.getStrides(strides);
Location loc = xferWriteOp.getLoc();
- SmallVector<Value *, 4> indices(xferWriteOp.indices().begin(),
- xferWriteOp.indices().end());
- auto createSlice = [&](unsigned index, ArrayRef<Value *> sliceIndices) {
+ SmallVector<ValuePtr, 4> indices(xferWriteOp.indices().begin(),
+ xferWriteOp.indices().end());
+ auto createSlice = [&](unsigned index, ArrayRef<ValuePtr> sliceIndices) {
// Create split TransferWriteOp for source vector 'tupleOp.operand[i]'.
rewriter.create<vector::TransferWriteOp>(
loc, tupleOp.getOperand(index), xferWriteOp.memref(), sliceIndices,
@@ -676,7 +674,7 @@ struct TupleGetFolderOp : public OpRewritePattern<vector::TupleGetOp> {
return matchFailure();
// Forward Value from 'tupleOp' at 'tupleGetOp.index'.
- Value *tupleValue = tupleOp.getOperand(tupleGetOp.getIndex());
+ ValuePtr tupleValue = tupleOp.getOperand(tupleGetOp.getIndex());
rewriter.replaceOp(tupleGetOp, tupleValue);
return matchSuccess();
}
diff --git a/mlir/lib/EDSC/Builders.cpp b/mlir/lib/EDSC/Builders.cpp
index 47e2dfed55e..35108ed5666 100644
--- a/mlir/lib/EDSC/Builders.cpp
+++ b/mlir/lib/EDSC/Builders.cpp
@@ -88,9 +88,8 @@ ValueHandle &mlir::edsc::ValueHandle::operator=(const ValueHandle &other) {
return *this;
}
-ValueHandle
-mlir::edsc::ValueHandle::createComposedAffineApply(AffineMap map,
- ArrayRef<Value *> operands) {
+ValueHandle mlir::edsc::ValueHandle::createComposedAffineApply(
+ AffineMap map, ArrayRef<ValuePtr> operands) {
Operation *op =
makeComposedAffineApply(ScopedContext::getBuilder(),
ScopedContext::getLocation(), map, operands)
@@ -118,7 +117,7 @@ OperationHandle OperationHandle::create(StringRef name,
ArrayRef<Type> resultTypes,
ArrayRef<NamedAttribute> attributes) {
OperationState state(ScopedContext::getLocation(), name);
- SmallVector<Value *, 4> ops(operands.begin(), operands.end());
+ SmallVector<ValuePtr, 4> ops(operands.begin(), operands.end());
state.addOperands(ops);
state.addTypes(resultTypes);
for (const auto &attr : attributes) {
@@ -169,8 +168,8 @@ mlir::edsc::LoopBuilder mlir::edsc::LoopBuilder::makeAffine(
if (auto staticFor = emitStaticFor(lbHandles, ubHandles, step)) {
*iv = staticFor.getValue();
} else {
- SmallVector<Value *, 4> lbs(lbHandles.begin(), lbHandles.end());
- SmallVector<Value *, 4> ubs(ubHandles.begin(), ubHandles.end());
+ SmallVector<ValuePtr, 4> lbs(lbHandles.begin(), lbHandles.end());
+ SmallVector<ValuePtr, 4> ubs(ubHandles.begin(), ubHandles.end());
*iv = ValueHandle::create<AffineForOp>(
lbs, ScopedContext::getBuilder().getMultiDimIdentityMap(lbs.size()),
ubs, ScopedContext::getBuilder().getMultiDimIdentityMap(ubs.size()),
@@ -309,11 +308,11 @@ static ValueHandle createBinaryHandle(ValueHandle lhs, ValueHandle rhs) {
return ValueHandle::create<Op>(lhs.getValue(), rhs.getValue());
}
-static std::pair<AffineExpr, Value *>
-categorizeValueByAffineType(MLIRContext *context, Value *val, unsigned &numDims,
- unsigned &numSymbols) {
+static std::pair<AffineExpr, ValuePtr>
+categorizeValueByAffineType(MLIRContext *context, ValuePtr val,
+ unsigned &numDims, unsigned &numSymbols) {
AffineExpr d;
- Value *resultVal = nullptr;
+ ValuePtr resultVal = nullptr;
if (auto constant = dyn_cast_or_null<ConstantIndexOp>(val->getDefiningOp())) {
d = getAffineConstantExpr(constant.getValue(), context);
} else if (isValidSymbol(val) && !isValidDim(val)) {
@@ -332,12 +331,12 @@ static ValueHandle createBinaryIndexHandle(
MLIRContext *context = ScopedContext::getContext();
unsigned numDims = 0, numSymbols = 0;
AffineExpr d0, d1;
- Value *v0, *v1;
+ ValuePtr v0, v1;
std::tie(d0, v0) =
categorizeValueByAffineType(context, lhs.getValue(), numDims, numSymbols);
std::tie(d1, v1) =
categorizeValueByAffineType(context, rhs.getValue(), numDims, numSymbols);
- SmallVector<Value *, 2> operands;
+ SmallVector<ValuePtr, 2> operands;
if (v0) {
operands.push_back(v0);
}
diff --git a/mlir/lib/EDSC/Helpers.cpp b/mlir/lib/EDSC/Helpers.cpp
index eeb28668a34..1771eb0a427 100644
--- a/mlir/lib/EDSC/Helpers.cpp
+++ b/mlir/lib/EDSC/Helpers.cpp
@@ -22,7 +22,7 @@
using namespace mlir;
using namespace mlir::edsc;
-static SmallVector<ValueHandle, 8> getMemRefSizes(Value *memRef) {
+static SmallVector<ValueHandle, 8> getMemRefSizes(ValuePtr memRef) {
MemRefType memRefType = memRef->getType().cast<MemRefType>();
assert(isStrided(memRefType) && "Expected strided MemRef type");
@@ -39,7 +39,7 @@ static SmallVector<ValueHandle, 8> getMemRefSizes(Value *memRef) {
return res;
}
-mlir::edsc::MemRefView::MemRefView(Value *v) : base(v) {
+mlir::edsc::MemRefView::MemRefView(ValuePtr v) : base(v) {
assert(v->getType().isa<MemRefType>() && "MemRefType expected");
auto memrefSizeValues = getMemRefSizes(v);
@@ -50,7 +50,7 @@ mlir::edsc::MemRefView::MemRefView(Value *v) : base(v) {
}
}
-mlir::edsc::VectorView::VectorView(Value *v) : base(v) {
+mlir::edsc::VectorView::VectorView(ValuePtr v) : base(v) {
auto vectorType = v->getType().cast<VectorType>();
for (auto s : vectorType.getShape()) {
diff --git a/mlir/lib/EDSC/Intrinsics.cpp b/mlir/lib/EDSC/Intrinsics.cpp
index 1b19f9aa0bf..c6738c42993 100644
--- a/mlir/lib/EDSC/Intrinsics.cpp
+++ b/mlir/lib/EDSC/Intrinsics.cpp
@@ -29,7 +29,7 @@ OperationHandle mlir::edsc::intrinsics::br(BlockHandle bh,
(void)o;
assert(o && "Expected already captured ValueHandle");
}
- SmallVector<Value *, 4> ops(operands.begin(), operands.end());
+ SmallVector<ValuePtr, 4> ops(operands.begin(), operands.end());
return OperationHandle::create<BranchOp>(bh.getBlock(), ops);
}
static void enforceEmptyCapturesMatchOperands(ArrayRef<ValueHandle *> captures,
@@ -52,7 +52,7 @@ OperationHandle mlir::edsc::intrinsics::br(BlockHandle *bh,
assert(!*bh && "Unexpected already captured BlockHandle");
enforceEmptyCapturesMatchOperands(captures, operands);
BlockBuilder(bh, captures)(/* no body */);
- SmallVector<Value *, 4> ops(operands.begin(), operands.end());
+ SmallVector<ValuePtr, 4> ops(operands.begin(), operands.end());
return OperationHandle::create<BranchOp>(bh->getBlock(), ops);
}
@@ -61,8 +61,8 @@ mlir::edsc::intrinsics::cond_br(ValueHandle cond, BlockHandle trueBranch,
ArrayRef<ValueHandle> trueOperands,
BlockHandle falseBranch,
ArrayRef<ValueHandle> falseOperands) {
- SmallVector<Value *, 4> trueOps(trueOperands.begin(), trueOperands.end());
- SmallVector<Value *, 4> falseOps(falseOperands.begin(), falseOperands.end());
+ SmallVector<ValuePtr, 4> trueOps(trueOperands.begin(), trueOperands.end());
+ SmallVector<ValuePtr, 4> falseOps(falseOperands.begin(), falseOperands.end());
return OperationHandle::create<CondBranchOp>(
cond, trueBranch.getBlock(), trueOps, falseBranch.getBlock(), falseOps);
}
@@ -78,8 +78,8 @@ OperationHandle mlir::edsc::intrinsics::cond_br(
enforceEmptyCapturesMatchOperands(falseCaptures, falseOperands);
BlockBuilder(trueBranch, trueCaptures)(/* no body */);
BlockBuilder(falseBranch, falseCaptures)(/* no body */);
- SmallVector<Value *, 4> trueOps(trueOperands.begin(), trueOperands.end());
- SmallVector<Value *, 4> falseOps(falseOperands.begin(), falseOperands.end());
+ SmallVector<ValuePtr, 4> trueOps(trueOperands.begin(), trueOperands.end());
+ SmallVector<ValuePtr, 4> falseOps(falseOperands.begin(), falseOperands.end());
return OperationHandle::create<CondBranchOp>(
cond, trueBranch->getBlock(), trueOps, falseBranch->getBlock(), falseOps);
}
diff --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp
index f3c92ada0a0..177d8a5ef05 100644
--- a/mlir/lib/IR/AsmPrinter.cpp
+++ b/mlir/lib/IR/AsmPrinter.cpp
@@ -319,7 +319,7 @@ void ModuleState::visitOperation(Operation *op) {
visitType(type);
for (auto &region : op->getRegions())
for (auto &block : region)
- for (auto *arg : block.getArguments())
+ for (auto arg : block.getArguments())
visitType(arg->getType());
// Visit each of the attributes.
@@ -1437,7 +1437,7 @@ public:
void printAttribute(Attribute attr) override {
ModulePrinter::printAttribute(attr);
}
- void printOperand(Value *value) override { printValueID(value); }
+ void printOperand(ValuePtr value) override { printValueID(value); }
void printOptionalAttrDict(ArrayRef<NamedAttribute> attrs,
ArrayRef<StringRef> elidedAttrs = {}) override {
@@ -1519,7 +1519,7 @@ protected:
void numberValuesInRegion(Region &region);
void numberValuesInBlock(Block &block);
void numberValuesInOp(Operation &op);
- void printValueID(Value *value, bool printResultNo = true) const {
+ void printValueID(ValuePtr value, bool printResultNo = true) const {
printValueIDImpl(value, printResultNo, os);
}
@@ -1528,13 +1528,13 @@ private:
/// 'lookupValue' and the result of 'result' within that group in
/// 'lookupResultNo'. 'lookupResultNo' is only filled in if the result group
/// has more than 1 result.
- void getResultIDAndNumber(OpResult *result, Value *&lookupValue,
+ void getResultIDAndNumber(OpResultPtr result, ValuePtr &lookupValue,
int &lookupResultNo) const;
- void printValueIDImpl(Value *value, bool printResultNo,
+ void printValueIDImpl(ValuePtr value, bool printResultNo,
raw_ostream &stream) const;
/// Set a special value name for the given value.
- void setValueName(Value *value, StringRef name);
+ void setValueName(ValuePtr value, StringRef name);
/// Uniques the given value name within the printer. If the given name
/// conflicts, it is automatically renamed.
@@ -1542,8 +1542,8 @@ private:
/// This is the value ID for each SSA value. If this returns ~0, then the
/// valueID has an entry in valueNames.
- DenseMap<Value *, unsigned> valueIDs;
- DenseMap<Value *, StringRef> valueNames;
+ DenseMap<ValuePtr, unsigned> valueIDs;
+ DenseMap<ValuePtr, StringRef> valueNames;
/// This is a map of operations that contain multiple named result groups,
/// i.e. there may be multiple names for the results of the operation. The key
@@ -1619,7 +1619,7 @@ void OperationPrinter::numberValuesInRegion(Region &region) {
}
void OperationPrinter::numberValuesInBlock(Block &block) {
- auto setArgNameFn = [&](Value *arg, StringRef name) {
+ auto setArgNameFn = [&](ValuePtr arg, StringRef name) {
assert(!valueIDs.count(arg) && "arg numbered multiple times");
assert(cast<BlockArgument>(arg)->getOwner() == &block &&
"arg not defined in 'block'");
@@ -1638,7 +1638,7 @@ void OperationPrinter::numberValuesInBlock(Block &block) {
// 'arg'.
SmallString<32> specialNameBuffer(isEntryBlock ? "arg" : "");
llvm::raw_svector_ostream specialName(specialNameBuffer);
- for (auto *arg : block.getArguments()) {
+ for (auto arg : block.getArguments()) {
if (valueIDs.count(arg))
continue;
if (isEntryBlock) {
@@ -1657,11 +1657,11 @@ void OperationPrinter::numberValuesInOp(Operation &op) {
unsigned numResults = op.getNumResults();
if (numResults == 0)
return;
- Value *resultBegin = op.getResult(0);
+ ValuePtr resultBegin = op.getResult(0);
// Function used to set the special result names for the operation.
SmallVector<int, 2> resultGroups(/*Size=*/1, /*Value=*/0);
- auto setResultNameFn = [&](Value *result, StringRef name) {
+ auto setResultNameFn = [&](ValuePtr result, StringRef name) {
assert(!valueIDs.count(result) && "result numbered multiple times");
assert(result->getDefiningOp() == &op && "result not defined by 'op'");
setValueName(result, name);
@@ -1690,7 +1690,7 @@ void OperationPrinter::numberValuesInOp(Operation &op) {
}
/// Set a special value name for the given value.
-void OperationPrinter::setValueName(Value *value, StringRef name) {
+void OperationPrinter::setValueName(ValuePtr value, StringRef name) {
// If the name is empty, the value uses the default numbering.
if (name.empty()) {
valueIDs[value] = nextValueID++;
@@ -1737,7 +1737,7 @@ void OperationPrinter::print(Block *block, bool printBlockArgs,
// Print the argument list if non-empty.
if (!block->args_empty()) {
os << '(';
- interleaveComma(block->getArguments(), [&](BlockArgument *arg) {
+ interleaveComma(block->getArguments(), [&](BlockArgumentPtr arg) {
printValueID(arg);
os << ": ";
printType(arg->getType());
@@ -1788,8 +1788,8 @@ void OperationPrinter::print(Operation *op) {
printTrailingLocation(op->getLoc());
}
-void OperationPrinter::getResultIDAndNumber(OpResult *result,
- Value *&lookupValue,
+void OperationPrinter::getResultIDAndNumber(OpResultPtr result,
+ ValuePtr &lookupValue,
int &lookupResultNo) const {
Operation *owner = result->getOwner();
if (owner->getNumResults() == 1)
@@ -1827,7 +1827,7 @@ void OperationPrinter::getResultIDAndNumber(OpResult *result,
lookupValue = owner->getResult(groupResultNo);
}
-void OperationPrinter::printValueIDImpl(Value *value, bool printResultNo,
+void OperationPrinter::printValueIDImpl(ValuePtr value, bool printResultNo,
raw_ostream &stream) const {
if (!value) {
stream << "<<NULL>>";
@@ -1840,7 +1840,7 @@ void OperationPrinter::printValueIDImpl(Value *value, bool printResultNo,
// If this is a reference to the result of a multi-result operation or
// operation, print out the # identifier and make sure to map our lookup
// to the first result of the operation.
- if (OpResult *result = dyn_cast<OpResult>(value))
+ if (OpResultPtr result = dyn_cast<OpResult>(value))
getResultIDAndNumber(result, lookupValue, resultNo);
auto it = valueIDs.find(lookupValue);
@@ -1875,11 +1875,11 @@ void OperationPrinter::shadowRegionArgs(Region &region, ValueRange namesToUse) {
SmallVector<char, 16> nameStr;
for (unsigned i = 0, e = namesToUse.size(); i != e; ++i) {
- auto *nameToUse = namesToUse[i];
+ auto nameToUse = namesToUse[i];
if (nameToUse == nullptr)
continue;
- auto *nameToReplace = region.front().getArgument(i);
+ auto nameToReplace = region.front().getArgument(i);
nameStr.clear();
llvm::raw_svector_ostream nameStream(nameStr);
@@ -1951,10 +1951,10 @@ void OperationPrinter::printGenericOp(Operation *op) {
for (unsigned i = 0; i < numSuccessors; ++i)
totalNumSuccessorOperands += op->getNumSuccessorOperands(i);
unsigned numProperOperands = op->getNumOperands() - totalNumSuccessorOperands;
- SmallVector<Value *, 8> properOperands(
+ SmallVector<ValuePtr, 8> properOperands(
op->operand_begin(), std::next(op->operand_begin(), numProperOperands));
- interleaveComma(properOperands, [&](Value *value) { printValueID(value); });
+ interleaveComma(properOperands, [&](ValuePtr value) { printValueID(value); });
os << ')';
@@ -1997,10 +1997,10 @@ void OperationPrinter::printSuccessorAndUseList(Operation *term,
os << '(';
interleaveComma(succOperands,
- [this](Value *operand) { printValueID(operand); });
+ [this](ValuePtr operand) { printValueID(operand); });
os << " : ";
interleaveComma(succOperands,
- [this](Value *operand) { printType(operand->getType()); });
+ [this](ValuePtr operand) { printType(operand->getType()); });
os << ')';
}
@@ -2072,7 +2072,7 @@ void Value::print(raw_ostream &os) {
if (auto *op = getDefiningOp())
return op->print(os);
// TODO: Improve this.
- assert(isa<BlockArgument>(*this));
+ assert(isa<BlockArgument>());
os << "<block argument>\n";
}
diff --git a/mlir/lib/IR/Block.cpp b/mlir/lib/IR/Block.cpp
index 4dac32ae0c0..894f9ba38d0 100644
--- a/mlir/lib/IR/Block.cpp
+++ b/mlir/lib/IR/Block.cpp
@@ -98,7 +98,7 @@ void Block::dropAllReferences() {
}
void Block::dropAllDefinedValueUses() {
- for (auto *arg : getArguments())
+ for (auto arg : getArguments())
arg->dropAllUses();
for (auto &op : *this)
op.dropAllDefinedValueUses();
@@ -151,7 +151,7 @@ void Block::recomputeOpOrder() {
// Argument list management.
//===----------------------------------------------------------------------===//
-BlockArgument *Block::addArgument(Type type) {
+BlockArgumentPtr Block::addArgument(Type type) {
auto *arg = new BlockArgument(type, this);
arguments.push_back(arg);
return arg;
diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp
index 691b2ad99c4..733fcd13994 100644
--- a/mlir/lib/IR/Builders.cpp
+++ b/mlir/lib/IR/Builders.cpp
@@ -343,7 +343,7 @@ Operation *OpBuilder::createOperation(const OperationState &state) {
/// 'results'. Returns success if the operation was folded, failure otherwise.
/// Note: This function does not erase the operation on a successful fold.
LogicalResult OpBuilder::tryFold(Operation *op,
- SmallVectorImpl<Value *> &results) {
+ SmallVectorImpl<ValuePtr> &results) {
results.reserve(op->getNumResults());
auto cleanupFailure = [&] {
results.assign(op->result_begin(), op->result_end());
@@ -374,7 +374,7 @@ LogicalResult OpBuilder::tryFold(Operation *op,
Dialect *dialect = op->getDialect();
for (auto &it : llvm::enumerate(foldResults)) {
// Normal values get pushed back directly.
- if (auto *value = it.value().dyn_cast<Value *>()) {
+ if (auto value = it.value().dyn_cast<ValuePtr>()) {
results.push_back(value);
continue;
}
diff --git a/mlir/lib/IR/Operation.cpp b/mlir/lib/IR/Operation.cpp
index 9df10791046..53399ce00a3 100644
--- a/mlir/lib/IR/Operation.cpp
+++ b/mlir/lib/IR/Operation.cpp
@@ -114,7 +114,7 @@ template <> unsigned BlockOperand::getOperandNumber() {
/// Create a new Operation with the specific fields.
Operation *Operation::create(Location location, OperationName name,
ArrayRef<Type> resultTypes,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
ArrayRef<NamedAttribute> attributes,
ArrayRef<Block *> successors, unsigned numRegions,
bool resizableOperandList) {
@@ -134,7 +134,7 @@ Operation *Operation::create(const OperationState &state) {
/// Create a new Operation with the specific fields.
Operation *Operation::create(Location location, OperationName name,
ArrayRef<Type> resultTypes,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
NamedAttributeList attributes,
ArrayRef<Block *> successors, RegionRange regions,
bool resizableOperandList) {
@@ -151,7 +151,7 @@ Operation *Operation::create(Location location, OperationName name,
/// unnecessarily uniquing a list of attributes.
Operation *Operation::create(Location location, OperationName name,
ArrayRef<Type> resultTypes,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
NamedAttributeList attributes,
ArrayRef<Block *> successors, unsigned numRegions,
bool resizableOperandList) {
@@ -314,7 +314,7 @@ bool Operation::isProperAncestor(Operation *other) {
}
/// Replace any uses of 'from' with 'to' within this operation.
-void Operation::replaceUsesOfWith(Value *from, Value *to) {
+void Operation::replaceUsesOfWith(ValuePtr from, ValuePtr to) {
if (from == to)
return;
for (auto &operand : getOpOperands())
@@ -585,7 +585,7 @@ void Operation::dropAllDefinedValueUses() {
/// Return true if there are no users of any results of this operation.
bool Operation::use_empty() {
- for (auto *result : getResults())
+ for (auto result : getResults())
if (!result->use_empty())
return false;
return true;
@@ -672,14 +672,14 @@ InFlightDiagnostic Operation::emitOpError(const Twine &message) {
/// Operands are remapped using `mapper` (if present), and `mapper` is updated
/// to contain the results.
Operation *Operation::cloneWithoutRegions(BlockAndValueMapping &mapper) {
- SmallVector<Value *, 8> operands;
+ SmallVector<ValuePtr, 8> operands;
SmallVector<Block *, 2> successors;
operands.reserve(getNumOperands() + getNumSuccessors());
if (getNumSuccessors() == 0) {
// Non-branching operations can just add all the operands.
- for (auto *opValue : getOperands())
+ for (auto opValue : getOperands())
operands.push_back(mapper.lookupOrDefault(opValue));
} else {
// We add the operands separated by nullptr's for each successor.
@@ -699,7 +699,7 @@ Operation *Operation::cloneWithoutRegions(BlockAndValueMapping &mapper) {
operands.push_back(nullptr);
// Remap the successors operands.
- for (auto *operand : getSuccessorOperands(succ))
+ for (auto operand : getSuccessorOperands(succ))
operands.push_back(mapper.lookupOrDefault(operand));
}
}
@@ -1092,8 +1092,8 @@ LogicalResult OpTrait::impl::verifyResultSizeAttr(Operation *op,
// These functions are out-of-line implementations of the methods in BinaryOp,
// which avoids them being template instantiated/duplicated.
-void impl::buildBinaryOp(Builder *builder, OperationState &result, Value *lhs,
- Value *rhs) {
+void impl::buildBinaryOp(Builder *builder, OperationState &result, ValuePtr lhs,
+ ValuePtr rhs) {
assert(lhs->getType() == rhs->getType());
result.addOperands({lhs, rhs});
result.types.push_back(lhs->getType());
@@ -1133,8 +1133,8 @@ void impl::printOneResultOp(Operation *op, OpAsmPrinter &p) {
// CastOp implementation
//===----------------------------------------------------------------------===//
-void impl::buildCastOp(Builder *builder, OperationState &result, Value *source,
- Type destType) {
+void impl::buildCastOp(Builder *builder, OperationState &result,
+ ValuePtr source, Type destType) {
result.addOperands(source);
result.addTypes(destType);
}
@@ -1157,7 +1157,7 @@ void impl::printCastOp(Operation *op, OpAsmPrinter &p) {
<< op->getResult(0)->getType();
}
-Value *impl::foldCastOp(Operation *op) {
+ValuePtr impl::foldCastOp(Operation *op) {
// Identity cast
if (op->getOperand(0)->getType() == op->getResult(0)->getType())
return op->getOperand(0);
diff --git a/mlir/lib/IR/OperationSupport.cpp b/mlir/lib/IR/OperationSupport.cpp
index 256a261acd8..333685a16fd 100644
--- a/mlir/lib/IR/OperationSupport.cpp
+++ b/mlir/lib/IR/OperationSupport.cpp
@@ -164,7 +164,7 @@ ResultRange::ResultRange(Operation *op)
//===----------------------------------------------------------------------===//
// ValueRange
-ValueRange::ValueRange(ArrayRef<Value *> values)
+ValueRange::ValueRange(ArrayRef<ValuePtr> values)
: ValueRange(values.data(), values.size()) {}
ValueRange::ValueRange(OperandRange values)
: ValueRange(values.begin().getBase(), values.size()) {}
@@ -176,18 +176,19 @@ ValueRange::OwnerT ValueRange::offset_base(const OwnerT &owner,
ptrdiff_t index) {
if (OpOperand *operand = owner.dyn_cast<OpOperand *>())
return operand + index;
- if (OpResult *result = owner.dyn_cast<OpResult *>())
+ if (OpResultPtr result = owner.dyn_cast<OpResultPtr>())
return result + index;
- return owner.get<Value *const *>() + index;
+ return owner.get<ValuePtr const *>() + index;
}
/// See `detail::indexed_accessor_range_base` for details.
-Value *ValueRange::dereference_iterator(const OwnerT &owner, ptrdiff_t index) {
+ValuePtr ValueRange::dereference_iterator(const OwnerT &owner,
+ ptrdiff_t index) {
// Operands access the held value via 'get'.
if (OpOperand *operand = owner.dyn_cast<OpOperand *>())
return operand[index].get();
// An OpResult is a value, so we can return it directly.
- if (OpResult *result = owner.dyn_cast<OpResult *>())
+ if (OpResultPtr result = owner.dyn_cast<OpResultPtr>())
return &result[index];
// Otherwise, this is a raw value array so just index directly.
- return owner.get<Value *const *>()[index];
+ return owner.get<ValuePtr const *>()[index];
}
diff --git a/mlir/lib/IR/Region.cpp b/mlir/lib/IR/Region.cpp
index 6cec021b6a1..26f14c43424 100644
--- a/mlir/lib/IR/Region.cpp
+++ b/mlir/lib/IR/Region.cpp
@@ -91,7 +91,7 @@ void Region::cloneInto(Region *dest, Region::iterator destPos,
// Clone the block arguments. The user might be deleting arguments to the
// block by specifying them in the mapper. If so, we don't add the
// argument to the cloned block.
- for (auto *arg : block.getArguments())
+ for (auto arg : block.getArguments())
if (!mapper.contains(arg))
mapper.map(arg, newBlock->addArgument(arg->getType()));
@@ -106,7 +106,7 @@ void Region::cloneInto(Region *dest, Region::iterator destPos,
// operands of each of the operations.
auto remapOperands = [&](Operation *op) {
for (auto &operand : op->getOpOperands())
- if (auto *mappedOp = mapper.lookupOrNull(operand.get()))
+ if (auto mappedOp = mapper.lookupOrNull(operand.get()))
operand.set(mappedOp);
for (auto &succOp : op->getBlockOperands())
if (auto *mappedOp = mapper.lookupOrNull(succOp.get()))
@@ -143,7 +143,7 @@ static bool isIsolatedAbove(Region &region, Region &limit,
while (!pendingRegions.empty()) {
for (Block &block : *pendingRegions.pop_back_val()) {
for (Operation &op : block) {
- for (Value *operand : op.getOperands()) {
+ for (ValuePtr operand : op.getOperands()) {
// operand should be non-null here if the IR is well-formed. But
// we don't assert here as this function is called from the verifier
// and so could be called on invalid IR.
diff --git a/mlir/lib/IR/TypeUtilities.cpp b/mlir/lib/IR/TypeUtilities.cpp
index 54b1bf6329b..8200e3a3bc6 100644
--- a/mlir/lib/IR/TypeUtilities.cpp
+++ b/mlir/lib/IR/TypeUtilities.cpp
@@ -33,11 +33,11 @@ Type mlir::getElementTypeOrSelf(Type type) {
return type;
}
-Type mlir::getElementTypeOrSelf(Value *val) {
+Type mlir::getElementTypeOrSelf(ValuePtr val) {
return getElementTypeOrSelf(val->getType());
}
-Type mlir::getElementTypeOrSelf(Value &val) {
+Type mlir::getElementTypeOrSelf(ValueRef val) {
return getElementTypeOrSelf(val.getType());
}
@@ -101,18 +101,18 @@ LogicalResult mlir::verifyCompatibleShape(Type type1, Type type2) {
OperandElementTypeIterator::OperandElementTypeIterator(
Operation::operand_iterator it)
- : llvm::mapped_iterator<Operation::operand_iterator, Type (*)(Value *)>(
+ : llvm::mapped_iterator<Operation::operand_iterator, Type (*)(ValuePtr)>(
it, &unwrap) {}
-Type OperandElementTypeIterator::unwrap(Value *value) {
+Type OperandElementTypeIterator::unwrap(ValuePtr value) {
return value->getType().cast<ShapedType>().getElementType();
}
ResultElementTypeIterator::ResultElementTypeIterator(
Operation::result_iterator it)
- : llvm::mapped_iterator<Operation::result_iterator, Type (*)(Value *)>(
+ : llvm::mapped_iterator<Operation::result_iterator, Type (*)(ValuePtr)>(
it, &unwrap) {}
-Type ResultElementTypeIterator::unwrap(Value *value) {
+Type ResultElementTypeIterator::unwrap(ValuePtr value) {
return value->getType().cast<ShapedType>().getElementType();
}
diff --git a/mlir/lib/IR/Value.cpp b/mlir/lib/IR/Value.cpp
index 4c2ea5ac69c..660d8ae3248 100644
--- a/mlir/lib/IR/Value.cpp
+++ b/mlir/lib/IR/Value.cpp
@@ -23,7 +23,7 @@ using namespace mlir;
/// If this value is the result of an Operation, return the operation that
/// defines it.
Operation *Value::getDefiningOp() {
- if (auto *result = dyn_cast<OpResult>(this))
+ if (auto *result = dyn_cast<OpResult>())
return result->getOwner();
return nullptr;
}
@@ -38,7 +38,7 @@ Location Value::getLoc() {
Region *Value::getParentRegion() {
if (auto *op = getDefiningOp())
return op->getParentRegion();
- return cast<BlockArgument>(this)->getOwner()->getParent();
+ return cast<BlockArgument>()->getOwner()->getParent();
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Parser/Parser.cpp b/mlir/lib/Parser/Parser.cpp
index 498a64d70c2..f78704842fe 100644
--- a/mlir/lib/Parser/Parser.cpp
+++ b/mlir/lib/Parser/Parser.cpp
@@ -3093,7 +3093,7 @@ public:
ParseResult popSSANameScope();
/// Register a definition of a value with the symbol table.
- ParseResult addDefinition(SSAUseInfo useInfo, Value *value);
+ ParseResult addDefinition(SSAUseInfo useInfo, ValuePtr value);
/// Parse an optional list of SSA uses into 'results'.
ParseResult parseOptionalSSAUseList(SmallVectorImpl<SSAUseInfo> &results);
@@ -3103,12 +3103,13 @@ public:
/// Given a reference to an SSA value and its type, return a reference. This
/// returns null on failure.
- Value *resolveSSAUse(SSAUseInfo useInfo, Type type);
+ ValuePtr resolveSSAUse(SSAUseInfo useInfo, Type type);
ParseResult parseSSADefOrUseAndType(
const std::function<ParseResult(SSAUseInfo, Type)> &action);
- ParseResult parseOptionalSSAUseAndTypeList(SmallVectorImpl<Value *> &results);
+ ParseResult
+ parseOptionalSSAUseAndTypeList(SmallVectorImpl<ValuePtr> &results);
/// Return the location of the value identified by its name and number if it
/// has been already reference.
@@ -3130,12 +3131,12 @@ public:
/// Parse a single operation successor and its operand list.
ParseResult parseSuccessorAndUseList(Block *&dest,
- SmallVectorImpl<Value *> &operands);
+ SmallVectorImpl<ValuePtr> &operands);
/// Parse a comma-separated list of operation successors in brackets.
ParseResult
parseSuccessors(SmallVectorImpl<Block *> &destinations,
- SmallVectorImpl<SmallVector<Value *, 4>> &operands);
+ SmallVectorImpl<SmallVector<ValuePtr, 4>> &operands);
/// Parse an operation instance that is in the generic form.
Operation *parseGenericOperation();
@@ -3174,7 +3175,7 @@ public:
/// Parse a (possibly empty) list of block arguments.
ParseResult
- parseOptionalBlockArgList(SmallVectorImpl<BlockArgument *> &results,
+ parseOptionalBlockArgList(SmallVectorImpl<BlockArgumentPtr> &results,
Block *owner);
/// Get the block with the specified name, creating it if it doesn't
@@ -3204,14 +3205,14 @@ private:
void recordDefinition(StringRef def);
/// Get the value entry for the given SSA name.
- SmallVectorImpl<std::pair<Value *, SMLoc>> &getSSAValueEntry(StringRef name);
+ SmallVectorImpl<std::pair<ValuePtr, SMLoc>> &getSSAValueEntry(StringRef name);
/// Create a forward reference placeholder value with the given location and
/// result type.
- Value *createForwardRefPlaceholder(SMLoc loc, Type type);
+ ValuePtr createForwardRefPlaceholder(SMLoc loc, Type type);
/// Return true if this is a forward reference.
- bool isForwardRefPlaceholder(Value *value) {
+ bool isForwardRefPlaceholder(ValuePtr value) {
return forwardRefPlaceholders.count(value);
}
@@ -3236,7 +3237,7 @@ private:
/// This keeps track of all of the SSA values we are tracking for each name
/// scope, indexed by their name. This has one entry per result number.
- llvm::StringMap<SmallVector<std::pair<Value *, SMLoc>, 1>> values;
+ llvm::StringMap<SmallVector<std::pair<ValuePtr, SMLoc>, 1>> values;
/// This keeps track of all of the values defined by a specific name scope.
SmallVector<llvm::StringSet<>, 2> definitionsPerScope;
@@ -3253,7 +3254,7 @@ private:
/// These are all of the placeholders we've made along with the location of
/// their first reference, to allow checking for use of undefined values.
- DenseMap<Value *, SMLoc> forwardRefPlaceholders;
+ DenseMap<ValuePtr, SMLoc> forwardRefPlaceholders;
/// The builder used when creating parsed operation instances.
OpBuilder opBuilder;
@@ -3278,7 +3279,7 @@ ParseResult OperationParser::finalize() {
// Check for any forward references that are left. If we find any, error
// out.
if (!forwardRefPlaceholders.empty()) {
- SmallVector<std::pair<const char *, Value *>, 4> errors;
+ SmallVector<std::pair<const char *, ValuePtr>, 4> errors;
// Iteration over the map isn't deterministic, so sort by source location.
for (auto entry : forwardRefPlaceholders)
errors.push_back({entry.second.getPointer(), entry.first});
@@ -3342,7 +3343,7 @@ ParseResult OperationParser::popSSANameScope() {
}
/// Register a definition of a value with the symbol table.
-ParseResult OperationParser::addDefinition(SSAUseInfo useInfo, Value *value) {
+ParseResult OperationParser::addDefinition(SSAUseInfo useInfo, ValuePtr value) {
auto &entries = getSSAValueEntry(useInfo.name);
// Make sure there is a slot for this value.
@@ -3351,7 +3352,7 @@ ParseResult OperationParser::addDefinition(SSAUseInfo useInfo, Value *value) {
// If we already have an entry for this, check to see if it was a definition
// or a forward reference.
- if (auto *existing = entries[useInfo.number].first) {
+ if (auto existing = entries[useInfo.number].first) {
if (!isForwardRefPlaceholder(existing)) {
return emitError(useInfo.loc)
.append("redefinition of SSA value '", useInfo.name, "'")
@@ -3416,12 +3417,12 @@ ParseResult OperationParser::parseSSAUse(SSAUseInfo &result) {
/// Given an unbound reference to an SSA value and its type, return the value
/// it specifies. This returns null on failure.
-Value *OperationParser::resolveSSAUse(SSAUseInfo useInfo, Type type) {
+ValuePtr OperationParser::resolveSSAUse(SSAUseInfo useInfo, Type type) {
auto &entries = getSSAValueEntry(useInfo.name);
// If we have already seen a value of this name, return it.
if (useInfo.number < entries.size() && entries[useInfo.number].first) {
- auto *result = entries[useInfo.number].first;
+ auto result = entries[useInfo.number].first;
// Check that the type matches the other uses.
if (result->getType() == type)
return result;
@@ -3447,7 +3448,7 @@ Value *OperationParser::resolveSSAUse(SSAUseInfo useInfo, Type type) {
// Otherwise, this is a forward reference. Create a placeholder and remember
// that we did so.
- auto *result = createForwardRefPlaceholder(useInfo.loc, type);
+ auto result = createForwardRefPlaceholder(useInfo.loc, type);
entries[useInfo.number].first = result;
entries[useInfo.number].second = useInfo.loc;
return result;
@@ -3477,7 +3478,7 @@ ParseResult OperationParser::parseSSADefOrUseAndType(
/// ::= ssa-use-list ':' type-list-no-parens
///
ParseResult OperationParser::parseOptionalSSAUseAndTypeList(
- SmallVectorImpl<Value *> &results) {
+ SmallVectorImpl<ValuePtr> &results) {
SmallVector<SSAUseInfo, 4> valueIDs;
if (parseOptionalSSAUseList(valueIDs))
return failure();
@@ -3497,7 +3498,7 @@ ParseResult OperationParser::parseOptionalSSAUseAndTypeList(
results.reserve(valueIDs.size());
for (unsigned i = 0, e = valueIDs.size(); i != e; ++i) {
- if (auto *value = resolveSSAUse(valueIDs[i], types[i]))
+ if (auto value = resolveSSAUse(valueIDs[i], types[i]))
results.push_back(value);
else
return failure();
@@ -3512,13 +3513,13 @@ void OperationParser::recordDefinition(StringRef def) {
}
/// Get the value entry for the given SSA name.
-SmallVectorImpl<std::pair<Value *, SMLoc>> &
+SmallVectorImpl<std::pair<ValuePtr, SMLoc>> &
OperationParser::getSSAValueEntry(StringRef name) {
return isolatedNameScopes.back().values[name];
}
/// Create and remember a new placeholder for a forward reference.
-Value *OperationParser::createForwardRefPlaceholder(SMLoc loc, Type type) {
+ValuePtr OperationParser::createForwardRefPlaceholder(SMLoc loc, Type type) {
// Forward references are always created as operations, because we just need
// something with a def/use chain.
//
@@ -3632,7 +3633,7 @@ ParseResult OperationParser::parseOperation() {
///
ParseResult
OperationParser::parseSuccessorAndUseList(Block *&dest,
- SmallVectorImpl<Value *> &operands) {
+ SmallVectorImpl<ValuePtr> &operands) {
// Verify branch is identifier and get the matching block.
if (!getToken().is(Token::caret_identifier))
return emitError("expected block name");
@@ -3655,13 +3656,13 @@ OperationParser::parseSuccessorAndUseList(Block *&dest,
///
ParseResult OperationParser::parseSuccessors(
SmallVectorImpl<Block *> &destinations,
- SmallVectorImpl<SmallVector<Value *, 4>> &operands) {
+ SmallVectorImpl<SmallVector<ValuePtr, 4>> &operands) {
if (parseToken(Token::l_square, "expected '['"))
return failure();
auto parseElt = [this, &destinations, &operands]() {
Block *dest;
- SmallVector<Value *, 4> destOperands;
+ SmallVector<ValuePtr, 4> destOperands;
auto res = parseSuccessorAndUseList(dest, destOperands);
destinations.push_back(dest);
operands.push_back(destOperands);
@@ -3718,7 +3719,7 @@ Operation *OperationParser::parseGenericOperation() {
// Parse the successor list but don't add successors to the result yet to
// avoid messing up with the argument order.
SmallVector<Block *, 2> successors;
- SmallVector<SmallVector<Value *, 4>, 2> successorOperands;
+ SmallVector<SmallVector<ValuePtr, 4>, 2> successorOperands;
if (getToken().is(Token::l_square)) {
// Check if the operation is a known terminator.
const AbstractOperation *abstractOp = result.name.getAbstractOperation();
@@ -3779,7 +3780,7 @@ Operation *OperationParser::parseGenericOperation() {
// Add the successors, and their operands after the proper operands.
for (const auto &succ : llvm::zip(successors, successorOperands)) {
Block *successor = std::get<0>(succ);
- const SmallVector<Value *, 4> &operands = std::get<1>(succ);
+ const SmallVector<ValuePtr, 4> &operands = std::get<1>(succ);
result.addSuccessor(successor, operands);
}
@@ -4129,10 +4130,10 @@ public:
/// Resolve an operand to an SSA value, emitting an error on failure.
ParseResult resolveOperand(const OperandType &operand, Type type,
- SmallVectorImpl<Value *> &result) override {
+ SmallVectorImpl<ValuePtr> &result) override {
OperationParser::SSAUseInfo operandInfo = {operand.name, operand.number,
operand.location};
- if (auto *value = parser.resolveSSAUse(operandInfo, type)) {
+ if (auto value = parser.resolveSSAUse(operandInfo, type)) {
result.push_back(value);
return success();
}
@@ -4242,7 +4243,7 @@ public:
/// Parse a single operation successor and its operand list.
ParseResult
parseSuccessorAndUseList(Block *&dest,
- SmallVectorImpl<Value *> &operands) override {
+ SmallVectorImpl<ValuePtr> &operands) override {
return parser.parseSuccessorAndUseList(dest, operands);
}
@@ -4470,7 +4471,7 @@ ParseResult OperationParser::parseBlock(Block *&block) {
// If an argument list is present, parse it.
if (consumeIf(Token::l_paren)) {
- SmallVector<BlockArgument *, 8> bbArgs;
+ SmallVector<BlockArgumentPtr, 8> bbArgs;
if (parseOptionalBlockArgList(bbArgs, block) ||
parseToken(Token::r_paren, "expected ')' to end argument list"))
return failure();
@@ -4534,7 +4535,7 @@ Block *OperationParser::defineBlockNamed(StringRef name, SMLoc loc,
/// ssa-id-and-type-list ::= ssa-id-and-type (`,` ssa-id-and-type)*
///
ParseResult OperationParser::parseOptionalBlockArgList(
- SmallVectorImpl<BlockArgument *> &results, Block *owner) {
+ SmallVectorImpl<BlockArgumentPtr> &results, Block *owner) {
if (getToken().is(Token::r_brace))
return success();
@@ -4555,7 +4556,7 @@ ParseResult OperationParser::parseOptionalBlockArgList(
return emitError("too many arguments specified in argument list");
// Finally, make sure the existing argument has the correct type.
- auto *arg = owner->getArgument(nextArgument++);
+ auto arg = owner->getArgument(nextArgument++);
if (arg->getType() != type)
return emitError("argument and block argument type mismatch");
return addDefinition(useInfo, arg);
diff --git a/mlir/lib/Pass/IRPrinting.cpp b/mlir/lib/Pass/IRPrinting.cpp
index 8e172156f05..9d1c1f0d391 100644
--- a/mlir/lib/Pass/IRPrinting.cpp
+++ b/mlir/lib/Pass/IRPrinting.cpp
@@ -48,14 +48,14 @@ public:
for (Region &region : op->getRegions()) {
for (Block &block : region) {
addDataToHash(hasher, &block);
- for (BlockArgument *arg : block.getArguments())
+ for (BlockArgumentPtr arg : block.getArguments())
addDataToHash(hasher, arg);
}
}
// - Location
addDataToHash(hasher, op->getLoc().getAsOpaquePointer());
// - Operands
- for (Value *operand : op->getOperands())
+ for (ValuePtr operand : op->getOperands())
addDataToHash(hasher, operand);
// - Successors
for (unsigned i = 0, e = op->getNumSuccessors(); i != e; ++i)
diff --git a/mlir/lib/Quantizer/Support/ConstraintAnalysisGraph.cpp b/mlir/lib/Quantizer/Support/ConstraintAnalysisGraph.cpp
index d38c76255f0..13fed0f9b1c 100644
--- a/mlir/lib/Quantizer/Support/ConstraintAnalysisGraph.cpp
+++ b/mlir/lib/Quantizer/Support/ConstraintAnalysisGraph.cpp
@@ -102,7 +102,7 @@ void CAGSlice::enumerateImpliedConnections(
std::vector<std::pair<CAGAnchorNode *, CAGAnchorNode *>> impliedPairs;
for (auto &resultAnchorPair : resultAnchors) {
CAGResultAnchor *resultAnchor = resultAnchorPair.second;
- Value *resultValue = resultAnchor->getValue();
+ ValuePtr resultValue = resultAnchor->getValue();
for (auto &use : resultValue->getUses()) {
Operation *operandOp = use.getOwner();
unsigned operandIdx = use.getOperandNumber();
diff --git a/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp b/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp
index a32bb2c9b3c..a3cbe214040 100644
--- a/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp
+++ b/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp
@@ -74,7 +74,7 @@ void AddDefaultStatsPass::runWithConfig(SolverContext &solverContext,
auto func = getFunction();
// Insert stats for each argument.
- for (auto *arg : func.getArguments()) {
+ for (auto arg : func.getArguments()) {
if (!config.isHandledType(arg->getType()))
continue;
OpBuilder b(func.getBody());
diff --git a/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp b/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp
index 511df0a463f..68c263bc423 100644
--- a/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp
+++ b/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp
@@ -181,17 +181,17 @@ void InferQuantizedTypesPass::runWithConfig(SolverContext &solverContext,
void InferQuantizedTypesPass::transformOperandType(CAGOperandAnchor *anchor,
Type newType) {
- Value *inputValue = anchor->getValue();
+ ValuePtr inputValue = anchor->getValue();
Operation *op = anchor->getOp();
OpBuilder b(op->getBlock(), Block::iterator(op));
- SmallVector<Value *, 1> removeValuesIfDead;
+ SmallVector<ValuePtr, 1> removeValuesIfDead;
// Because we've already run the result transforms at this phase, it is
// very likely that inputValue points to a dcast op whose input matches
// our type. We detect that situation and route around just to save some
// bulk in the IR.
- Value *newTypedInputValue = inputValue;
+ ValuePtr newTypedInputValue = inputValue;
auto inputDcastOp =
dyn_cast_or_null<DequantizeCastOp>(inputValue->getDefiningOp());
if (inputDcastOp && inputDcastOp.arg()->getType() == newType) {
@@ -228,7 +228,7 @@ void InferQuantizedTypesPass::transformOperandType(CAGOperandAnchor *anchor,
break;
}
- for (Value *removeValueIfDead : removeValuesIfDead) {
+ for (ValuePtr removeValueIfDead : removeValuesIfDead) {
if (removeValueIfDead->use_empty()) {
removeValueIfDead->getDefiningOp()->erase();
}
@@ -237,12 +237,12 @@ void InferQuantizedTypesPass::transformOperandType(CAGOperandAnchor *anchor,
void InferQuantizedTypesPass::transformResultType(CAGResultAnchor *anchor,
Type newType) {
- Value *origResultValue = anchor->getValue();
+ ValuePtr origResultValue = anchor->getValue();
Operation *op = origResultValue->getDefiningOp();
OpBuilder b(op->getBlock(), ++Block::iterator(op));
- Value *replacedResultValue = nullptr;
- Value *newResultValue = nullptr;
+ ValuePtr replacedResultValue = nullptr;
+ ValuePtr newResultValue = nullptr;
switch (anchor->getTypeTransformRule()) {
case CAGAnchorNode::TypeTransformRule::Direct:
origResultValue->setType(newType);
diff --git a/mlir/lib/TableGen/Pattern.cpp b/mlir/lib/TableGen/Pattern.cpp
index 098dba3ae6e..e8f44087b85 100644
--- a/mlir/lib/TableGen/Pattern.cpp
+++ b/mlir/lib/TableGen/Pattern.cpp
@@ -224,7 +224,7 @@ tblgen::SymbolInfoMap::SymbolInfo::getVarDecl(StringRef name) const {
return formatv("Operation::operand_range {0}(op0->getOperands());\n", name);
}
case Kind::Value: {
- return formatv("ArrayRef<Value *> {0};\n", name);
+ return formatv("ArrayRef<ValuePtr> {0};\n", name);
}
case Kind::Result: {
// Use the op itself for captured results.
diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
index 6cf975bcce2..7273d3dfd7b 100644
--- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
+++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
@@ -76,7 +76,7 @@ private:
/// `value` is an SSA-use. Return the remapped version of `value` or a
/// placeholder that will be remapped later if this is an instruction that
/// has not yet been visited.
- Value *processValue(llvm::Value *value);
+ ValuePtr processValue(llvm::Value *value);
/// Create the most accurate Location possible using a llvm::DebugLoc and
/// possibly an llvm::Instruction to narrow the Location if debug information
/// is unavailable.
@@ -85,14 +85,14 @@ private:
/// `br` branches to `target`. Return the block arguments to attach to the
/// generated branch op. These should be in the same order as the PHIs in
/// `target`.
- SmallVector<Value *, 4> processBranchArgs(llvm::BranchInst *br,
- llvm::BasicBlock *target);
+ SmallVector<ValuePtr, 4> processBranchArgs(llvm::BranchInst *br,
+ llvm::BasicBlock *target);
/// Return `value` as an attribute to attach to a GlobalOp.
Attribute getConstantAsAttr(llvm::Constant *value);
/// Return `c` as an MLIR Value. This could either be a ConstantOp, or
/// an expanded sequence of ops in the current function's entry block (for
/// ConstantExprs or ConstantGEPs).
- Value *processConstant(llvm::Constant *c);
+ ValuePtr processConstant(llvm::Constant *c);
/// The current builder, pointing at where the next Instruction should be
/// generated.
@@ -120,7 +120,7 @@ private:
/// Remapped blocks, for the current function.
DenseMap<llvm::BasicBlock *, Block *> blocks;
/// Remapped values. These are function-local.
- DenseMap<llvm::Value *, Value *> instMap;
+ DenseMap<llvm::Value *, ValuePtr> instMap;
/// Instructions that had not been defined when first encountered as a use.
/// Maps to the dummy Operation that was created in processValue().
DenseMap<llvm::Value *, Operation *> unknownInstMap;
@@ -263,13 +263,13 @@ GlobalOp Importer::processGlobal(llvm::GlobalVariable *GV) {
Region &r = op.getInitializerRegion();
currentEntryBlock = b.createBlock(&r);
b.setInsertionPoint(currentEntryBlock, currentEntryBlock->begin());
- Value *v = processConstant(GV->getInitializer());
- b.create<ReturnOp>(op.getLoc(), ArrayRef<Value *>({v}));
+ ValuePtr v = processConstant(GV->getInitializer());
+ b.create<ReturnOp>(op.getLoc(), ArrayRef<ValuePtr>({v}));
}
return globals[GV] = op;
}
-Value *Importer::processConstant(llvm::Constant *c) {
+ValuePtr Importer::processConstant(llvm::Constant *c) {
if (Attribute attr = getConstantAsAttr(c)) {
// These constants can be represented as attributes.
OpBuilder b(currentEntryBlock, currentEntryBlock->begin());
@@ -298,7 +298,7 @@ Value *Importer::processConstant(llvm::Constant *c) {
return nullptr;
}
-Value *Importer::processValue(llvm::Value *value) {
+ValuePtr Importer::processValue(llvm::Value *value) {
auto it = instMap.find(value);
if (it != instMap.end())
return it->second;
@@ -407,9 +407,9 @@ static ICmpPredicate getICmpPredicate(llvm::CmpInst::Predicate p) {
// `br` branches to `target`. Return the branch arguments to `br`, in the
// same order of the PHIs in `target`.
-SmallVector<Value *, 4> Importer::processBranchArgs(llvm::BranchInst *br,
- llvm::BasicBlock *target) {
- SmallVector<Value *, 4> v;
+SmallVector<ValuePtr, 4> Importer::processBranchArgs(llvm::BranchInst *br,
+ llvm::BasicBlock *target) {
+ SmallVector<ValuePtr, 4> v;
for (auto inst = target->begin(); isa<llvm::PHINode>(inst); ++inst) {
auto *PN = cast<llvm::PHINode>(&*inst);
v.push_back(processValue(PN->getIncomingValueForBlock(br->getParent())));
@@ -421,7 +421,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) {
// FIXME: Support uses of SubtargetData. Currently inbounds GEPs, fast-math
// flags and call / operand attributes are not supported.
Location loc = processDebugLoc(inst->getDebugLoc(), inst);
- Value *&v = instMap[inst];
+ ValuePtr &v = instMap[inst];
assert(!v && "processInstruction must be called only once per instruction!");
switch (inst->getOpcode()) {
default:
@@ -462,7 +462,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) {
case llvm::Instruction::AddrSpaceCast:
case llvm::Instruction::BitCast: {
OperationState state(loc, opcMap.lookup(inst->getOpcode()));
- SmallVector<Value *, 4> ops;
+ SmallVector<ValuePtr, 4> ops;
ops.reserve(inst->getNumOperands());
for (auto *op : inst->operand_values())
ops.push_back(processValue(op));
@@ -484,7 +484,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) {
auto *brInst = cast<llvm::BranchInst>(inst);
OperationState state(loc,
brInst->isConditional() ? "llvm.cond_br" : "llvm.br");
- SmallVector<Value *, 4> ops;
+ SmallVector<ValuePtr, 4> ops;
if (brInst->isConditional())
ops.push_back(processValue(brInst->getCondition()));
state.addOperands(ops);
@@ -500,7 +500,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) {
}
case llvm::Instruction::Call: {
llvm::CallInst *ci = cast<llvm::CallInst>(inst);
- SmallVector<Value *, 4> ops;
+ SmallVector<ValuePtr, 4> ops;
ops.reserve(inst->getNumOperands());
for (auto &op : ci->arg_operands())
ops.push_back(processValue(op.get()));
@@ -523,7 +523,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) {
case llvm::Instruction::GetElementPtr: {
// FIXME: Support inbounds GEPs.
llvm::GetElementPtrInst *gep = cast<llvm::GetElementPtrInst>(inst);
- SmallVector<Value *, 4> ops;
+ SmallVector<ValuePtr, 4> ops;
for (auto *op : gep->operand_values())
ops.push_back(processValue(op));
v = b.create<GEPOp>(loc, processType(inst->getType()), ops,
@@ -565,8 +565,8 @@ LogicalResult Importer::processFunction(llvm::Function *f) {
// any unknown uses we encountered are remapped.
for (auto &llvmAndUnknown : unknownInstMap) {
assert(instMap.count(llvmAndUnknown.first));
- Value *newValue = instMap[llvmAndUnknown.first];
- Value *oldValue = llvmAndUnknown.second->getResult(0);
+ ValuePtr newValue = instMap[llvmAndUnknown.first];
+ ValuePtr oldValue = llvmAndUnknown.second->getResult(0);
oldValue->replaceAllUsesWith(newValue);
llvmAndUnknown.second->erase();
}
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index e59c69aa25b..ec28434b823 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -248,7 +248,7 @@ LogicalResult ModuleTranslation::convertBlock(Block &bb, bool ignoreArguments) {
auto predecessors = bb.getPredecessors();
unsigned numPredecessors =
std::distance(predecessors.begin(), predecessors.end());
- for (auto *arg : bb.getArguments()) {
+ for (auto arg : bb.getArguments()) {
auto wrappedType = arg->getType().dyn_cast<LLVM::LLVMType>();
if (!wrappedType)
return emitError(bb.front().getLoc(),
@@ -342,8 +342,8 @@ void ModuleTranslation::convertGlobals() {
/// Get the SSA value passed to the current block from the terminator operation
/// of its predecessor.
-static Value *getPHISourceValue(Block *current, Block *pred,
- unsigned numArguments, unsigned index) {
+static ValuePtr getPHISourceValue(Block *current, Block *pred,
+ unsigned numArguments, unsigned index) {
auto &terminator = *pred->getTerminator();
if (isa<LLVM::BrOp>(terminator)) {
return terminator.getOperand(index);
@@ -420,7 +420,7 @@ LogicalResult ModuleTranslation::convertOneFunction(LLVMFuncOp func) {
unsigned int argIdx = 0;
for (const auto &kvp : llvm::zip(func.getArguments(), llvmFunc->args())) {
llvm::Argument &llvmArg = std::get<1>(kvp);
- BlockArgument *mlirArg = std::get<0>(kvp);
+ BlockArgumentPtr mlirArg = std::get<0>(kvp);
if (auto attr = func.getArgAttrOfType<BoolAttr>(argIdx, "llvm.noalias")) {
// NB: Attribute already verified to be boolean, so check if we can indeed
@@ -497,7 +497,7 @@ SmallVector<llvm::Value *, 8>
ModuleTranslation::lookupValues(ValueRange values) {
SmallVector<llvm::Value *, 8> remapped;
remapped.reserve(values.size());
- for (Value *v : values)
+ for (ValuePtr v : values)
remapped.push_back(valueMapping.lookup(v));
return remapped;
}
diff --git a/mlir/lib/Transforms/AffineDataCopyGeneration.cpp b/mlir/lib/Transforms/AffineDataCopyGeneration.cpp
index 7fb356f3ad2..5bc33943e50 100644
--- a/mlir/lib/Transforms/AffineDataCopyGeneration.cpp
+++ b/mlir/lib/Transforms/AffineDataCopyGeneration.cpp
@@ -130,7 +130,7 @@ struct AffineDataCopyGeneration
bool skipNonUnitStrideLoops;
// Constant zero index to avoid too many duplicates.
- Value *zeroIndex = nullptr;
+ ValuePtr zeroIndex = nullptr;
};
} // end anonymous namespace
diff --git a/mlir/lib/Transforms/AffineLoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/AffineLoopInvariantCodeMotion.cpp
index f384f6d3fb1..23199dd8a39 100644
--- a/mlir/lib/Transforms/AffineLoopInvariantCodeMotion.cpp
+++ b/mlir/lib/Transforms/AffineLoopInvariantCodeMotion.cpp
@@ -58,15 +58,15 @@ struct LoopInvariantCodeMotion : public FunctionPass<LoopInvariantCodeMotion> {
} // end anonymous namespace
static bool
-checkInvarianceOfNestedIfOps(Operation *op, Value *indVar,
+checkInvarianceOfNestedIfOps(Operation *op, ValuePtr indVar,
SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist);
-static bool isOpLoopInvariant(Operation &op, Value *indVar,
+static bool isOpLoopInvariant(Operation &op, ValuePtr indVar,
SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist);
static bool
-areAllOpsInTheBlockListInvariant(Region &blockList, Value *indVar,
+areAllOpsInTheBlockListInvariant(Region &blockList, ValuePtr indVar,
SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist);
@@ -79,7 +79,7 @@ static bool isMemRefDereferencingOp(Operation &op) {
}
// Returns true if the individual op is loop invariant.
-bool isOpLoopInvariant(Operation &op, Value *indVar,
+bool isOpLoopInvariant(Operation &op, ValuePtr indVar,
SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist) {
LLVM_DEBUG(llvm::dbgs() << "iterating on op: " << op;);
@@ -97,9 +97,9 @@ bool isOpLoopInvariant(Operation &op, Value *indVar,
return false;
} else if (!isa<ConstantOp>(op)) {
if (isMemRefDereferencingOp(op)) {
- Value *memref = isa<AffineLoadOp>(op)
- ? cast<AffineLoadOp>(op).getMemRef()
- : cast<AffineStoreOp>(op).getMemRef();
+ ValuePtr memref = isa<AffineLoadOp>(op)
+ ? cast<AffineLoadOp>(op).getMemRef()
+ : cast<AffineStoreOp>(op).getMemRef();
for (auto *user : memref->getUsers()) {
// If this memref has a user that is a DMA, give up because these
// operations write to this memref.
@@ -163,7 +163,8 @@ bool isOpLoopInvariant(Operation &op, Value *indVar,
// Checks if all ops in a region (i.e. list of blocks) are loop invariant.
bool areAllOpsInTheBlockListInvariant(
- Region &blockList, Value *indVar, SmallPtrSetImpl<Operation *> &definedOps,
+ Region &blockList, ValuePtr indVar,
+ SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist) {
for (auto &b : blockList) {
@@ -178,7 +179,7 @@ bool areAllOpsInTheBlockListInvariant(
}
// Returns true if the affine.if op can be hoisted.
-bool checkInvarianceOfNestedIfOps(Operation *op, Value *indVar,
+bool checkInvarianceOfNestedIfOps(Operation *op, ValuePtr indVar,
SmallPtrSetImpl<Operation *> &definedOps,
SmallPtrSetImpl<Operation *> &opsToHoist) {
assert(isa<AffineIfOp>(op));
@@ -199,7 +200,7 @@ bool checkInvarianceOfNestedIfOps(Operation *op, Value *indVar,
void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) {
auto *loopBody = forOp.getBody();
- auto *indVar = forOp.getInductionVar();
+ auto indVar = forOp.getInductionVar();
SmallPtrSet<Operation *, 8> definedOps;
// This is the place where hoisted instructions would reside.
diff --git a/mlir/lib/Transforms/DialectConversion.cpp b/mlir/lib/Transforms/DialectConversion.cpp
index 37c918fe9be..05066ef599c 100644
--- a/mlir/lib/Transforms/DialectConversion.cpp
+++ b/mlir/lib/Transforms/DialectConversion.cpp
@@ -86,13 +86,13 @@ namespace {
struct ConversionValueMapping {
/// Lookup a mapped value within the map. If a mapping for the provided value
/// does not exist then return the provided value.
- Value *lookupOrDefault(Value *from) const;
+ ValuePtr lookupOrDefault(ValuePtr from) const;
/// Map a value to the one provided.
- void map(Value *oldVal, Value *newVal) { mapping.map(oldVal, newVal); }
+ void map(ValuePtr oldVal, ValuePtr newVal) { mapping.map(oldVal, newVal); }
/// Drop the last mapping for the given value.
- void erase(Value *value) { mapping.erase(value); }
+ void erase(ValuePtr value) { mapping.erase(value); }
private:
/// Current value mappings.
@@ -102,10 +102,10 @@ private:
/// Lookup a mapped value within the map. If a mapping for the provided value
/// does not exist then return the provided value.
-Value *ConversionValueMapping::lookupOrDefault(Value *from) const {
+ValuePtr ConversionValueMapping::lookupOrDefault(ValuePtr from) const {
// If this value had a valid mapping, unmap that value as well in the case
// that it was also replaced.
- while (auto *mappedValue = mapping.lookupOrNull(from))
+ while (auto mappedValue = mapping.lookupOrNull(from))
from = mappedValue;
return from;
}
@@ -127,7 +127,7 @@ struct ArgConverter {
/// been converted.
struct ConvertedArgInfo {
ConvertedArgInfo(unsigned newArgIdx, unsigned newArgSize,
- Value *castValue = nullptr)
+ ValuePtr castValue = nullptr)
: newArgIdx(newArgIdx), newArgSize(newArgSize), castValue(castValue) {}
/// The start index of in the new argument list that contains arguments that
@@ -139,7 +139,7 @@ struct ArgConverter {
/// The cast value that was created to cast from the new arguments to the
/// old. This only used if 'newArgSize' > 1.
- Value *castValue;
+ ValuePtr castValue;
};
/// This structure contains information pertaining to a block that has had its
@@ -235,7 +235,7 @@ void ArgConverter::notifyOpRemoved(Operation *op) {
// Drop all uses of the original arguments and delete the original block.
Block *origBlock = it->second.origBlock;
- for (BlockArgument *arg : origBlock->getArguments())
+ for (BlockArgumentPtr arg : origBlock->getArguments())
arg->dropAllUses();
conversionInfo.erase(it);
}
@@ -270,7 +270,7 @@ void ArgConverter::applyRewrites(ConversionValueMapping &mapping) {
// Process the remapping for each of the original arguments.
for (unsigned i = 0, e = origBlock->getNumArguments(); i != e; ++i) {
Optional<ConvertedArgInfo> &argInfo = blockInfo.argInfo[i];
- BlockArgument *origArg = origBlock->getArgument(i);
+ BlockArgumentPtr origArg = origBlock->getArgument(i);
// Handle the case of a 1->0 value mapping.
if (!argInfo) {
@@ -305,7 +305,7 @@ void ArgConverter::applyRewrites(ConversionValueMapping &mapping) {
}
// Otherwise this is a 1->N value mapping.
- Value *castValue = argInfo->castValue;
+ ValuePtr castValue = argInfo->castValue;
assert(argInfo->newArgSize > 1 && castValue && "expected 1->N mapping");
// If the argument is still used, replace it with the generated cast.
@@ -344,8 +344,8 @@ Block *ArgConverter::applySignatureConversion(
Block *newBlock = block->splitBlock(block->begin());
block->replaceAllUsesWith(newBlock);
- SmallVector<Value *, 4> newArgRange(newBlock->addArguments(convertedTypes));
- ArrayRef<Value *> newArgs(newArgRange);
+ SmallVector<ValuePtr, 4> newArgRange(newBlock->addArguments(convertedTypes));
+ ArrayRef<ValuePtr> newArgs(newArgRange);
// Remap each of the original arguments as determined by the signature
// conversion.
@@ -358,7 +358,7 @@ Block *ArgConverter::applySignatureConversion(
auto inputMap = signatureConversion.getInputMapping(i);
if (!inputMap)
continue;
- BlockArgument *origArg = block->getArgument(i);
+ BlockArgumentPtr origArg = block->getArgument(i);
// If inputMap->replacementValue is not nullptr, then the argument is
// dropped and a replacement value is provided to be the remappedValue.
@@ -445,7 +445,7 @@ struct ConversionPatternRewriterImpl {
: op(op), newValues(newValues.begin(), newValues.end()) {}
Operation *op;
- SmallVector<Value *, 2> newValues;
+ SmallVector<ValuePtr, 2> newValues;
};
/// The kind of the block action performed during the rewrite. Actions can be
@@ -542,7 +542,7 @@ struct ConversionPatternRewriterImpl {
/// Remap the given operands to those with potentially different types.
void remapValues(Operation::operand_range operands,
- SmallVectorImpl<Value *> &remapped);
+ SmallVectorImpl<ValuePtr> &remapped);
/// Returns true if the given operation is ignored, and does not need to be
/// converted.
@@ -591,7 +591,7 @@ void ConversionPatternRewriterImpl::resetState(RewriterState state) {
// Reset any replaced operations and undo any saved mappings.
for (auto &repl : llvm::drop_begin(replacements, state.numReplacements))
- for (auto *result : repl.op->getResults())
+ for (auto result : repl.op->getResults())
mapping.erase(result);
replacements.resize(state.numReplacements);
@@ -660,7 +660,7 @@ void ConversionPatternRewriterImpl::applyRewrites() {
// Apply all of the rewrites replacements requested during conversion.
for (auto &repl : replacements) {
for (unsigned i = 0, e = repl.newValues.size(); i != e; ++i) {
- if (auto *newValue = repl.newValues[i])
+ if (auto newValue = repl.newValues[i])
repl.op->getResult(i)->replaceAllUsesWith(
mapping.lookupOrDefault(newValue));
}
@@ -715,7 +715,7 @@ void ConversionPatternRewriterImpl::replaceOp(Operation *op,
// Create mappings for each of the new result values.
for (unsigned i = 0, e = newValues.size(); i < e; ++i)
- if (auto *repl = newValues[i])
+ if (auto repl = newValues[i])
mapping.map(op->getResult(i), repl);
// Record the requested operation replacement.
@@ -755,9 +755,9 @@ void ConversionPatternRewriterImpl::notifyRegionWasClonedBefore(
}
void ConversionPatternRewriterImpl::remapValues(
- Operation::operand_range operands, SmallVectorImpl<Value *> &remapped) {
+ Operation::operand_range operands, SmallVectorImpl<ValuePtr> &remapped) {
remapped.reserve(llvm::size(operands));
- for (Value *operand : operands)
+ for (ValuePtr operand : operands)
remapped.push_back(mapping.lookupOrDefault(operand));
}
@@ -803,7 +803,7 @@ void ConversionPatternRewriter::replaceOp(Operation *op, ValueRange newValues,
void ConversionPatternRewriter::eraseOp(Operation *op) {
LLVM_DEBUG(llvm::dbgs() << "** Erasing operation : " << op->getName()
<< "\n");
- SmallVector<Value *, 1> nullRepls(op->getNumResults(), nullptr);
+ SmallVector<ValuePtr, 1> nullRepls(op->getNumResults(), nullptr);
impl->replaceOp(op, nullRepls, /*valuesToRemoveIfDead=*/llvm::None);
}
@@ -813,8 +813,8 @@ Block *ConversionPatternRewriter::applySignatureConversion(
return impl->applySignatureConversion(region, conversion);
}
-void ConversionPatternRewriter::replaceUsesOfBlockArgument(BlockArgument *from,
- Value *to) {
+void ConversionPatternRewriter::replaceUsesOfBlockArgument(
+ BlockArgumentPtr from, ValuePtr to) {
for (auto &u : from->getUses()) {
if (u.getOwner() == to->getDefiningOp())
continue;
@@ -825,7 +825,7 @@ void ConversionPatternRewriter::replaceUsesOfBlockArgument(BlockArgument *from,
/// Return the converted value that replaces 'key'. Return 'key' if there is
/// no such a converted value.
-Value *ConversionPatternRewriter::getRemappedValue(Value *key) {
+ValuePtr ConversionPatternRewriter::getRemappedValue(ValuePtr key) {
return impl->mapping.lookupOrDefault(key);
}
@@ -896,7 +896,7 @@ detail::ConversionPatternRewriterImpl &ConversionPatternRewriter::getImpl() {
PatternMatchResult
ConversionPattern::matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const {
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
auto &dialectRewriter = static_cast<ConversionPatternRewriter &>(rewriter);
dialectRewriter.getImpl().remapValues(op->getOperands(), operands);
@@ -908,7 +908,7 @@ ConversionPattern::matchAndRewrite(Operation *op,
SmallVector<Block *, 2> destinations;
destinations.reserve(op->getNumSuccessors());
- SmallVector<ArrayRef<Value *>, 2> operandsPerDestination;
+ SmallVector<ArrayRef<ValuePtr>, 2> operandsPerDestination;
unsigned firstSuccessorOperand = op->getSuccessorOperandIndex(0);
for (unsigned i = 0, seen = 0, e = op->getNumSuccessors(); i < e; ++i) {
destinations.push_back(op->getSuccessor(i));
@@ -1059,7 +1059,7 @@ OperationLegalizer::legalizeWithFold(Operation *op,
RewriterState curState = rewriterImpl.getCurrentState();
// Try to fold the operation.
- SmallVector<Value *, 2> replacementValues;
+ SmallVector<ValuePtr, 2> replacementValues;
rewriter.setInsertionPoint(op);
if (failed(rewriter.tryFold(op, replacementValues)))
return failure();
@@ -1459,7 +1459,7 @@ void TypeConverter::SignatureConversion::remapInput(unsigned origInputNo,
/// Remap an input of the original signature to another `replacementValue`
/// value. This would make the signature converter drop this argument.
void TypeConverter::SignatureConversion::remapInput(unsigned origInputNo,
- Value *replacementValue) {
+ ValuePtr replacementValue) {
assert(!remappedInputs[origInputNo] && "input has already been remapped");
remappedInputs[origInputNo] =
InputMapping{origInputNo, /*size=*/0, replacementValue};
@@ -1528,7 +1528,7 @@ struct FuncOpSignatureConversion : public OpConversionPattern<FuncOp> {
/// Hook for derived classes to implement combined matching and rewriting.
PatternMatchResult
- matchAndRewrite(FuncOp funcOp, ArrayRef<Value *> operands,
+ matchAndRewrite(FuncOp funcOp, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
FunctionType type = funcOp.getType();
diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index 5694c990b9b..60f0264eb35 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -172,7 +172,7 @@ public:
Node(unsigned id, Operation *op) : id(id), op(op) {}
// Returns the load op count for 'memref'.
- unsigned getLoadOpCount(Value *memref) {
+ unsigned getLoadOpCount(ValuePtr memref) {
unsigned loadOpCount = 0;
for (auto *loadOpInst : loads) {
if (memref == cast<AffineLoadOp>(loadOpInst).getMemRef())
@@ -182,7 +182,7 @@ public:
}
// Returns the store op count for 'memref'.
- unsigned getStoreOpCount(Value *memref) {
+ unsigned getStoreOpCount(ValuePtr memref) {
unsigned storeOpCount = 0;
for (auto *storeOpInst : stores) {
if (memref == cast<AffineStoreOp>(storeOpInst).getMemRef())
@@ -192,7 +192,7 @@ public:
}
// Returns all store ops in 'storeOps' which access 'memref'.
- void getStoreOpsForMemref(Value *memref,
+ void getStoreOpsForMemref(ValuePtr memref,
SmallVectorImpl<Operation *> *storeOps) {
for (auto *storeOpInst : stores) {
if (memref == cast<AffineStoreOp>(storeOpInst).getMemRef())
@@ -201,7 +201,7 @@ public:
}
// Returns all load ops in 'loadOps' which access 'memref'.
- void getLoadOpsForMemref(Value *memref,
+ void getLoadOpsForMemref(ValuePtr memref,
SmallVectorImpl<Operation *> *loadOps) {
for (auto *loadOpInst : loads) {
if (memref == cast<AffineLoadOp>(loadOpInst).getMemRef())
@@ -211,13 +211,13 @@ public:
// Returns all memrefs in 'loadAndStoreMemrefSet' for which this node
// has at least one load and store operation.
- void getLoadAndStoreMemrefSet(DenseSet<Value *> *loadAndStoreMemrefSet) {
- llvm::SmallDenseSet<Value *, 2> loadMemrefs;
+ void getLoadAndStoreMemrefSet(DenseSet<ValuePtr> *loadAndStoreMemrefSet) {
+ llvm::SmallDenseSet<ValuePtr, 2> loadMemrefs;
for (auto *loadOpInst : loads) {
loadMemrefs.insert(cast<AffineLoadOp>(loadOpInst).getMemRef());
}
for (auto *storeOpInst : stores) {
- auto *memref = cast<AffineStoreOp>(storeOpInst).getMemRef();
+ auto memref = cast<AffineStoreOp>(storeOpInst).getMemRef();
if (loadMemrefs.count(memref) > 0)
loadAndStoreMemrefSet->insert(memref);
}
@@ -239,7 +239,7 @@ public:
// defines an SSA value and another graph node which uses the SSA value
// (e.g. a constant operation defining a value which is used inside a loop
// nest).
- Value *value;
+ ValuePtr value;
};
// Map from node id to Node.
@@ -250,7 +250,7 @@ public:
DenseMap<unsigned, SmallVector<Edge, 2>> outEdges;
// Map from memref to a count on the dependence edges associated with that
// memref.
- DenseMap<Value *, unsigned> memrefEdgeCount;
+ DenseMap<ValuePtr, unsigned> memrefEdgeCount;
// The next unique identifier to use for newly created graph nodes.
unsigned nextNodeId = 0;
@@ -309,7 +309,7 @@ public:
bool writesToLiveInOrEscapingMemrefs(unsigned id) {
Node *node = getNode(id);
for (auto *storeOpInst : node->stores) {
- auto *memref = cast<AffineStoreOp>(storeOpInst).getMemRef();
+ auto memref = cast<AffineStoreOp>(storeOpInst).getMemRef();
auto *op = memref->getDefiningOp();
// Return true if 'memref' is a block argument.
if (!op)
@@ -338,7 +338,7 @@ public:
const auto &nodeOutEdges = outEdgeIt->second;
for (auto *op : node->stores) {
auto storeOp = cast<AffineStoreOp>(op);
- auto *memref = storeOp.getMemRef();
+ auto memref = storeOp.getMemRef();
// Skip this store if there are no dependences on its memref. This means
// that store either:
// *) writes to a memref that is only read within the same loop nest
@@ -381,7 +381,7 @@ public:
// Returns true iff there is an edge from node 'srcId' to node 'dstId' which
// is for 'value' if non-null, or for any value otherwise. Returns false
// otherwise.
- bool hasEdge(unsigned srcId, unsigned dstId, Value *value = nullptr) {
+ bool hasEdge(unsigned srcId, unsigned dstId, ValuePtr value = nullptr) {
if (outEdges.count(srcId) == 0 || inEdges.count(dstId) == 0) {
return false;
}
@@ -395,7 +395,7 @@ public:
}
// Adds an edge from node 'srcId' to node 'dstId' for 'value'.
- void addEdge(unsigned srcId, unsigned dstId, Value *value) {
+ void addEdge(unsigned srcId, unsigned dstId, ValuePtr value) {
if (!hasEdge(srcId, dstId, value)) {
outEdges[srcId].push_back({dstId, value});
inEdges[dstId].push_back({srcId, value});
@@ -405,7 +405,7 @@ public:
}
// Removes an edge from node 'srcId' to node 'dstId' for 'value'.
- void removeEdge(unsigned srcId, unsigned dstId, Value *value) {
+ void removeEdge(unsigned srcId, unsigned dstId, ValuePtr value) {
assert(inEdges.count(dstId) > 0);
assert(outEdges.count(srcId) > 0);
if (value->getType().isa<MemRefType>()) {
@@ -459,7 +459,7 @@ public:
// Returns the input edge count for node 'id' and 'memref' from src nodes
// which access 'memref' with a store operation.
- unsigned getIncomingMemRefAccesses(unsigned id, Value *memref) {
+ unsigned getIncomingMemRefAccesses(unsigned id, ValuePtr memref) {
unsigned inEdgeCount = 0;
if (inEdges.count(id) > 0)
for (auto &inEdge : inEdges[id])
@@ -474,7 +474,7 @@ public:
// Returns the output edge count for node 'id' and 'memref' (if non-null),
// otherwise returns the total output edge count from node 'id'.
- unsigned getOutEdgeCount(unsigned id, Value *memref = nullptr) {
+ unsigned getOutEdgeCount(unsigned id, ValuePtr memref = nullptr) {
unsigned outEdgeCount = 0;
if (outEdges.count(id) > 0)
for (auto &outEdge : outEdges[id])
@@ -548,7 +548,7 @@ public:
// Updates edge mappings from node 'srcId' to node 'dstId' after 'oldMemRef'
// has been replaced in node at 'dstId' by a private memref depending
// on the value of 'createPrivateMemRef'.
- void updateEdges(unsigned srcId, unsigned dstId, Value *oldMemRef,
+ void updateEdges(unsigned srcId, unsigned dstId, ValuePtr oldMemRef,
bool createPrivateMemRef) {
// For each edge in 'inEdges[srcId]': add new edge remaping to 'dstId'.
if (inEdges.count(srcId) > 0) {
@@ -681,7 +681,7 @@ public:
// TODO(andydavis) Add support for taking a Block arg to construct the
// dependence graph at a different depth.
bool MemRefDependenceGraph::init(FuncOp f) {
- DenseMap<Value *, SetVector<unsigned>> memrefAccesses;
+ DenseMap<ValuePtr, SetVector<unsigned>> memrefAccesses;
// TODO: support multi-block functions.
if (f.getBlocks().size() != 1)
@@ -701,12 +701,12 @@ bool MemRefDependenceGraph::init(FuncOp f) {
Node node(nextNodeId++, &op);
for (auto *opInst : collector.loadOpInsts) {
node.loads.push_back(opInst);
- auto *memref = cast<AffineLoadOp>(opInst).getMemRef();
+ auto memref = cast<AffineLoadOp>(opInst).getMemRef();
memrefAccesses[memref].insert(node.id);
}
for (auto *opInst : collector.storeOpInsts) {
node.stores.push_back(opInst);
- auto *memref = cast<AffineStoreOp>(opInst).getMemRef();
+ auto memref = cast<AffineStoreOp>(opInst).getMemRef();
memrefAccesses[memref].insert(node.id);
}
forToNodeMap[&op] = node.id;
@@ -715,14 +715,14 @@ bool MemRefDependenceGraph::init(FuncOp f) {
// Create graph node for top-level load op.
Node node(nextNodeId++, &op);
node.loads.push_back(&op);
- auto *memref = cast<AffineLoadOp>(op).getMemRef();
+ auto memref = cast<AffineLoadOp>(op).getMemRef();
memrefAccesses[memref].insert(node.id);
nodes.insert({node.id, node});
} else if (auto storeOp = dyn_cast<AffineStoreOp>(op)) {
// Create graph node for top-level store op.
Node node(nextNodeId++, &op);
node.stores.push_back(&op);
- auto *memref = cast<AffineStoreOp>(op).getMemRef();
+ auto memref = cast<AffineStoreOp>(op).getMemRef();
memrefAccesses[memref].insert(node.id);
nodes.insert({node.id, node});
} else if (op.getNumRegions() != 0) {
@@ -743,7 +743,7 @@ bool MemRefDependenceGraph::init(FuncOp f) {
if (!node.loads.empty() || !node.stores.empty())
continue;
auto *opInst = node.op;
- for (auto *value : opInst->getResults()) {
+ for (auto value : opInst->getResults()) {
for (auto *user : value->getUsers()) {
SmallVector<AffineForOp, 4> loops;
getLoopIVs(*user, &loops);
@@ -777,7 +777,7 @@ bool MemRefDependenceGraph::init(FuncOp f) {
// Removes load operations from 'srcLoads' which operate on 'memref', and
// adds them to 'dstLoads'.
-static void moveLoadsAccessingMemrefTo(Value *memref,
+static void moveLoadsAccessingMemrefTo(ValuePtr memref,
SmallVectorImpl<Operation *> *srcLoads,
SmallVectorImpl<Operation *> *dstLoads) {
dstLoads->clear();
@@ -893,10 +893,11 @@ static unsigned getMemRefEltSizeInBytes(MemRefType memRefType) {
// MemRefRegion written to by 'srcStoreOpInst' at depth 'dstLoopDepth'.
// TODO(bondhugula): consider refactoring the common code from generateDma and
// this one.
-static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
- unsigned dstLoopDepth,
- Optional<unsigned> fastMemorySpace,
- uint64_t localBufSizeThreshold) {
+static ValuePtr createPrivateMemRef(AffineForOp forOp,
+ Operation *srcStoreOpInst,
+ unsigned dstLoopDepth,
+ Optional<unsigned> fastMemorySpace,
+ uint64_t localBufSizeThreshold) {
auto *forInst = forOp.getOperation();
// Create builder to insert alloc op just before 'forOp'.
@@ -904,7 +905,7 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
// Builder to create constants at the top level.
OpBuilder top(forInst->getParentOfType<FuncOp>().getBody());
// Create new memref type based on slice bounds.
- auto *oldMemRef = cast<AffineStoreOp>(srcStoreOpInst).getMemRef();
+ auto oldMemRef = cast<AffineStoreOp>(srcStoreOpInst).getMemRef();
auto oldMemRefType = oldMemRef->getType().cast<MemRefType>();
unsigned rank = oldMemRefType.getRank();
@@ -928,7 +929,7 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
// 'outerIVs' holds the values that this memory region is symbolic/parametric
// on; this would correspond to loop IVs surrounding the level at which the
// slice is being materialized.
- SmallVector<Value *, 8> outerIVs;
+ SmallVector<ValuePtr, 8> outerIVs;
cst->getIdValues(rank, cst->getNumIds(), &outerIVs);
// Build 'rank' AffineExprs from MemRefRegion 'lbs'
@@ -960,7 +961,7 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
auto newMemRefType = MemRefType::get(newShape, oldMemRefType.getElementType(),
{}, newMemSpace);
// Gather alloc operands for the dynamic dimensions of the memref.
- SmallVector<Value *, 4> allocOperands;
+ SmallVector<ValuePtr, 4> allocOperands;
unsigned dynamicDimCount = 0;
for (auto dimSize : oldMemRefType.getShape()) {
if (dimSize == -1)
@@ -973,7 +974,7 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
// consumer loop nests to reduce their live range. Currently they are added
// at the beginning of the function, because loop nests can be reordered
// during the fusion pass.
- Value *newMemRef =
+ ValuePtr newMemRef =
top.create<AllocOp>(forOp.getLoc(), newMemRefType, allocOperands);
// Build an AffineMap to remap access functions based on lower bound offsets.
@@ -1016,7 +1017,7 @@ static bool canFuseSrcWhichWritesToLiveOut(unsigned srcId, unsigned dstId,
MemRefDependenceGraph *mdg) {
assert(srcLiveOutStoreOp && "Expected a valid store op");
auto *dstNode = mdg->getNode(dstId);
- Value *memref = srcLiveOutStoreOp.getMemRef();
+ ValuePtr memref = srcLiveOutStoreOp.getMemRef();
// Return false if 'srcNode' has more than one output edge on 'memref'.
if (mdg->getOutEdgeCount(srcId, memref) > 1)
return false;
@@ -1495,10 +1496,10 @@ public:
SmallVector<Operation *, 4> loads = dstNode->loads;
SmallVector<Operation *, 4> dstLoadOpInsts;
- DenseSet<Value *> visitedMemrefs;
+ DenseSet<ValuePtr> visitedMemrefs;
while (!loads.empty()) {
// Get memref of load on top of the stack.
- auto *memref = cast<AffineLoadOp>(loads.back()).getMemRef();
+ auto memref = cast<AffineLoadOp>(loads.back()).getMemRef();
if (visitedMemrefs.count(memref) > 0)
continue;
visitedMemrefs.insert(memref);
@@ -1653,7 +1654,7 @@ public:
}
// TODO(andydavis) Use union of memref write regions to compute
// private memref footprint.
- auto *newMemRef = createPrivateMemRef(
+ auto newMemRef = createPrivateMemRef(
dstAffineForOp, storesForMemref[0], bestDstLoopDepth,
fastMemorySpace, localBufSizeThreshold);
visitedMemrefs.insert(newMemRef);
@@ -1671,7 +1672,7 @@ public:
// Add new load ops to current Node load op list 'loads' to
// continue fusing based on new operands.
for (auto *loadOpInst : dstLoopCollector.loadOpInsts) {
- auto *loadMemRef = cast<AffineLoadOp>(loadOpInst).getMemRef();
+ auto loadMemRef = cast<AffineLoadOp>(loadOpInst).getMemRef();
if (visitedMemrefs.count(loadMemRef) == 0)
loads.push_back(loadOpInst);
}
@@ -1737,10 +1738,10 @@ public:
// Attempt to fuse 'dstNode' with sibling nodes in the graph.
void fuseWithSiblingNodes(Node *dstNode) {
DenseSet<unsigned> visitedSibNodeIds;
- std::pair<unsigned, Value *> idAndMemref;
+ std::pair<unsigned, ValuePtr> idAndMemref;
while (findSiblingNodeToFuse(dstNode, &visitedSibNodeIds, &idAndMemref)) {
unsigned sibId = idAndMemref.first;
- Value *memref = idAndMemref.second;
+ ValuePtr memref = idAndMemref.second;
// TODO(andydavis) Check that 'sibStoreOpInst' post-dominates all other
// stores to the same memref in 'sibNode' loop nest.
auto *sibNode = mdg->getNode(sibId);
@@ -1804,10 +1805,10 @@ public:
// 'idAndMemrefToFuse' on success. Returns false otherwise.
bool findSiblingNodeToFuse(Node *dstNode,
DenseSet<unsigned> *visitedSibNodeIds,
- std::pair<unsigned, Value *> *idAndMemrefToFuse) {
+ std::pair<unsigned, ValuePtr> *idAndMemrefToFuse) {
// Returns true if 'sibNode' can be fused with 'dstNode' for input reuse
// on 'memref'.
- auto canFuseWithSibNode = [&](Node *sibNode, Value *memref) {
+ auto canFuseWithSibNode = [&](Node *sibNode, ValuePtr memref) {
// Skip if 'outEdge' is not a read-after-write dependence.
// TODO(andydavis) Remove restrict to single load op restriction.
if (sibNode->getLoadOpCount(memref) != 1)
@@ -1819,15 +1820,15 @@ public:
return false;
// Skip sib node if it loads to (and stores from) the same memref on
// which it also has an input dependence edge.
- DenseSet<Value *> loadAndStoreMemrefSet;
+ DenseSet<ValuePtr> loadAndStoreMemrefSet;
sibNode->getLoadAndStoreMemrefSet(&loadAndStoreMemrefSet);
- if (llvm::any_of(loadAndStoreMemrefSet, [=](Value *memref) {
+ if (llvm::any_of(loadAndStoreMemrefSet, [=](ValuePtr memref) {
return mdg->getIncomingMemRefAccesses(sibNode->id, memref) > 0;
}))
return false;
// Check that all stores are to the same memref.
- DenseSet<Value *> storeMemrefs;
+ DenseSet<ValuePtr> storeMemrefs;
for (auto *storeOpInst : sibNode->stores) {
storeMemrefs.insert(cast<AffineStoreOp>(storeOpInst).getMemRef());
}
@@ -1856,7 +1857,7 @@ public:
if (visitedSibNodeIds->count(sibNode->id) > 0)
continue;
// Skip 'use' if it does not load from the same memref as 'dstNode'.
- auto *memref = loadOp.getMemRef();
+ auto memref = loadOp.getMemRef();
if (dstNode->getLoadOpCount(memref) == 0)
continue;
// Check if 'sibNode/dstNode' can be input-reuse fused on 'memref'.
@@ -1950,7 +1951,7 @@ public:
for (auto &pair : mdg->memrefEdgeCount) {
if (pair.second > 0)
continue;
- auto *memref = pair.first;
+ auto memref = pair.first;
// Skip if there exist other uses (return operation or function calls).
if (!memref->use_empty())
continue;
diff --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
index 4932494a04b..bd58827d001 100644
--- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
+++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
@@ -50,7 +50,7 @@ public:
// - the op has no side-effects. If sideEffecting is Never, sideeffects of this
// op and its nested ops are ignored.
static bool canBeHoisted(Operation *op,
- function_ref<bool(Value *)> definedOutside,
+ function_ref<bool(ValuePtr)> definedOutside,
SideEffecting sideEffecting,
SideEffectsInterface &interface) {
// Check that dependencies are defined outside of loop.
@@ -92,7 +92,7 @@ static LogicalResult moveLoopInvariantCode(LoopLikeOpInterface looplike,
SmallVector<Operation *, 8> opsToMove;
// Helper to check whether an operation is loop invariant wrt. SSA properties.
- auto isDefinedOutsideOfBody = [&](Value *value) {
+ auto isDefinedOutsideOfBody = [&](ValuePtr value) {
auto definingOp = value->getDefiningOp();
return (definingOp && !!willBeMovedSet.count(definingOp)) ||
looplike.isDefinedOutsideOfLoop(value);
diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp
index 10654783aa9..361a4d8ecb9 100644
--- a/mlir/lib/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Transforms/LoopTiling.cpp
@@ -120,8 +120,8 @@ constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
for (unsigned i = 0; i < width; i++) {
auto lbOperands = origLoops[i].getLowerBoundOperands();
auto ubOperands = origLoops[i].getUpperBoundOperands();
- SmallVector<Value *, 4> newLbOperands(lbOperands);
- SmallVector<Value *, 4> newUbOperands(ubOperands);
+ SmallVector<ValuePtr, 4> newLbOperands(lbOperands);
+ SmallVector<ValuePtr, 4> newUbOperands(ubOperands);
newLoops[i].setLowerBound(newLbOperands, origLoops[i].getLowerBoundMap());
newLoops[i].setUpperBound(newUbOperands, origLoops[i].getUpperBoundMap());
newLoops[i].setStep(tileSizes[i]);
@@ -147,7 +147,7 @@ constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
// with 'i' (tile-space loop) appended to it. The new upper bound map is
// the original one with an additional expression i + tileSize appended.
auto ub = origLoops[i].getUpperBound();
- SmallVector<Value *, 4> ubOperands;
+ SmallVector<ValuePtr, 4> ubOperands;
ubOperands.reserve(ub.getNumOperands() + 1);
auto origUbMap = ub.getMap();
// Add dim operands from original upper bound.
@@ -235,9 +235,10 @@ LogicalResult mlir::tileCodeGen(MutableArrayRef<AffineForOp> band,
// Move the loop body of the original nest to the new one.
moveLoopBody(origLoops[origLoops.size() - 1], innermostPointLoop);
- SmallVector<Value *, 8> origLoopIVs;
+ SmallVector<ValuePtr, 8> origLoopIVs;
extractForInductionVars(band, &origLoopIVs);
- SmallVector<Optional<Value *>, 6> ids(origLoopIVs.begin(), origLoopIVs.end());
+ SmallVector<Optional<ValuePtr>, 6> ids(origLoopIVs.begin(),
+ origLoopIVs.end());
FlatAffineConstraints cst;
getIndexSet(band, &cst);
diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
index 230869abcd5..a857b8ec95a 100644
--- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
@@ -191,7 +191,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
// Adjust the lower bound of the cleanup loop; its upper bound is the same
// as the original loop's upper bound.
AffineMap cleanupMap;
- SmallVector<Value *, 4> cleanupOperands;
+ SmallVector<ValuePtr, 4> cleanupOperands;
getCleanupLoopLowerBound(forOp, unrollJamFactor, &cleanupMap,
&cleanupOperands, builder);
cleanupAffineForOp.setLowerBound(cleanupOperands, cleanupMap);
@@ -208,7 +208,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
int64_t step = forOp.getStep();
forOp.setStep(step * unrollJamFactor);
- auto *forOpIV = forOp.getInductionVar();
+ auto forOpIV = forOp.getInductionVar();
// Unroll and jam (appends unrollJamFactor - 1 additional copies).
for (unsigned i = unrollJamFactor - 1; i >= 1; --i) {
// Operand map persists across all sub-blocks.
diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
index c531ca551b4..0695aafe171 100644
--- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
+++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
@@ -76,7 +76,7 @@ struct MemRefDataFlowOpt : public FunctionPass<MemRefDataFlowOpt> {
void forwardStoreToLoad(AffineLoadOp loadOp);
// A list of memref's that are potentially dead / could be eliminated.
- SmallPtrSet<Value *, 4> memrefsToErase;
+ SmallPtrSet<ValuePtr, 4> memrefsToErase;
// Load op's whose results were replaced by those forwarded from stores.
SmallVector<Operation *, 8> loadOpsToErase;
@@ -180,7 +180,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(AffineLoadOp loadOp) {
return;
// Perform the actual store to load forwarding.
- Value *storeVal = cast<AffineStoreOp>(lastWriteStoreOp).getValueToStore();
+ ValuePtr storeVal = cast<AffineStoreOp>(lastWriteStoreOp).getValueToStore();
loadOp.replaceAllUsesWith(storeVal);
// Record the memref for a later sweep to optimize away.
memrefsToErase.insert(loadOp.getMemRef());
@@ -213,7 +213,7 @@ void MemRefDataFlowOpt::runOnFunction() {
// Check if the store fwd'ed memrefs are now left with only stores and can
// thus be completely deleted. Note: the canonicalize pass should be able
// to do this as well, but we'll do it here since we collected these anyway.
- for (auto *memref : memrefsToErase) {
+ for (auto memref : memrefsToErase) {
// If the memref hasn't been alloc'ed in this function, skip.
Operation *defInst = memref->getDefiningOp();
if (!defInst || !isa<AllocOp>(defInst))
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index fdf01351549..4162936ea2d 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -70,7 +70,7 @@ static unsigned getTagMemRefPos(Operation &dmaInst) {
/// Replaces all uses of the old memref by the new one while indexing the newly
/// added dimension by the loop IV of the specified 'affine.for' operation
/// modulo 2. Returns false if such a replacement cannot be performed.
-static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) {
+static bool doubleBuffer(ValuePtr oldMemRef, AffineForOp forOp) {
auto *forBody = forOp.getBody();
OpBuilder bInner(forBody, forBody->begin());
@@ -94,7 +94,7 @@ static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) {
auto *forInst = forOp.getOperation();
OpBuilder bOuter(forInst);
// Put together alloc operands for any dynamic dimensions of the memref.
- SmallVector<Value *, 4> allocOperands;
+ SmallVector<ValuePtr, 4> allocOperands;
unsigned dynamicDimCount = 0;
for (auto dimSize : oldMemRefType.getShape()) {
if (dimSize == -1)
@@ -103,7 +103,7 @@ static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) {
}
// Create and place the alloc right before the 'affine.for' operation.
- Value *newMemRef =
+ ValuePtr newMemRef =
bOuter.create<AllocOp>(forInst->getLoc(), newMemRefType, allocOperands);
// Create 'iv mod 2' value to index the leading dimension.
@@ -212,7 +212,7 @@ static void findMatchingStartFinishInsts(
continue;
// We only double buffer if the buffer is not live out of loop.
- auto *memref = dmaStartOp.getOperand(dmaStartOp.getFasterMemPos());
+ auto memref = dmaStartOp.getOperand(dmaStartOp.getFasterMemPos());
bool escapingUses = false;
for (auto *user : memref->getUsers()) {
// We can double buffer regardless of dealloc's outside the loop.
@@ -270,7 +270,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) {
// dimension.
for (auto &pair : startWaitPairs) {
auto *dmaStartInst = pair.first;
- Value *oldMemRef = dmaStartInst->getOperand(
+ ValuePtr oldMemRef = dmaStartInst->getOperand(
cast<AffineDmaStartOp>(dmaStartInst).getFasterMemPos());
if (!doubleBuffer(oldMemRef, forOp)) {
// Normally, double buffering should not fail because we already checked
@@ -301,7 +301,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) {
// Double the buffers for tag memrefs.
for (auto &pair : startWaitPairs) {
auto *dmaFinishInst = pair.second;
- Value *oldTagMemRef =
+ ValuePtr oldTagMemRef =
dmaFinishInst->getOperand(getTagMemRefPos(*dmaFinishInst));
if (!doubleBuffer(oldTagMemRef, forOp)) {
LLVM_DEBUG(llvm::dbgs() << "tag double buffering failed\n";);
@@ -342,7 +342,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) {
// If a slice wasn't created, the reachable affine.apply op's from its
// operands are the ones that go with it.
SmallVector<Operation *, 4> affineApplyInsts;
- SmallVector<Value *, 4> operands(dmaStartInst->getOperands());
+ SmallVector<ValuePtr, 4> operands(dmaStartInst->getOperands());
getReachableAffineApplyOps(operands, affineApplyInsts);
for (auto *op : affineApplyInsts) {
instShiftMap[op] = 0;
diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp
index d4b7caae527..85d1f21305e 100644
--- a/mlir/lib/Transforms/Utils/FoldUtils.cpp
+++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp
@@ -90,7 +90,7 @@ LogicalResult OperationFolder::tryToFold(
return failure();
// Try to fold the operation.
- SmallVector<Value *, 8> results;
+ SmallVector<ValuePtr, 8> results;
if (failed(tryToFold(op, results, processGeneratedConstants)))
return failure();
@@ -138,7 +138,7 @@ void OperationFolder::notifyRemoval(Operation *op) {
/// Tries to perform folding on the given `op`. If successful, populates
/// `results` with the results of the folding.
LogicalResult OperationFolder::tryToFold(
- Operation *op, SmallVectorImpl<Value *> &results,
+ Operation *op, SmallVectorImpl<ValuePtr> &results,
function_ref<void(Operation *)> processGeneratedConstants) {
SmallVector<Attribute, 8> operandConstants;
SmallVector<OpFoldResult, 8> foldResults;
@@ -181,13 +181,13 @@ LogicalResult OperationFolder::tryToFold(
assert(!foldResults[i].isNull() && "expected valid OpFoldResult");
// Check if the result was an SSA value.
- if (auto *repl = foldResults[i].dyn_cast<Value *>()) {
+ if (auto repl = foldResults[i].dyn_cast<ValuePtr>()) {
results.emplace_back(repl);
continue;
}
// Check to see if there is a canonicalized version of this constant.
- auto *res = op->getResult(i);
+ auto res = op->getResult(i);
Attribute attrRepl = foldResults[i].get<Attribute>();
if (auto *constOp =
tryGetOrCreateConstant(uniquedConstants, dialect, builder, attrRepl,
diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
index e2ca3f8fc5e..fe4a6f9f9e0 100644
--- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
+++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
@@ -107,7 +107,7 @@ protected:
// simplifications to its users - make sure to add them to the worklist
// before the root is changed.
void notifyRootReplaced(Operation *op) override {
- for (auto *result : op->getResults())
+ for (auto result : op->getResults())
for (auto *user : result->getUsers())
addToWorklist(user);
}
@@ -118,7 +118,7 @@ private:
// operation is modified or removed, as it may trigger further
// simplifications.
template <typename Operands> void addToWorklist(Operands &&operands) {
- for (Value *operand : operands) {
+ for (ValuePtr operand : operands) {
// If the use count of this operand is now < 2, we re-add the defining
// operation to the worklist.
// TODO(riverriddle) This is based on the fact that zero use operations
@@ -160,7 +160,7 @@ bool GreedyPatternRewriteDriver::simplify(MutableArrayRef<Region> regions,
region.walk(collectOps);
// These are scratch vectors used in the folding loop below.
- SmallVector<Value *, 8> originalOperands, resultValues;
+ SmallVector<ValuePtr, 8> originalOperands, resultValues;
changed = false;
while (!worklist.empty()) {
@@ -189,7 +189,7 @@ bool GreedyPatternRewriteDriver::simplify(MutableArrayRef<Region> regions,
// Add all the users of the result to the worklist so we make sure
// to revisit them.
- for (auto *result : op->getResults())
+ for (auto result : op->getResults())
for (auto *operand : result->getUsers())
addToWorklist(operand);
diff --git a/mlir/lib/Transforms/Utils/InliningUtils.cpp b/mlir/lib/Transforms/Utils/InliningUtils.cpp
index e8466aa3fd6..048130c0d3a 100644
--- a/mlir/lib/Transforms/Utils/InliningUtils.cpp
+++ b/mlir/lib/Transforms/Utils/InliningUtils.cpp
@@ -55,7 +55,7 @@ static void remapInlinedOperands(iterator_range<Region::iterator> inlinedBlocks,
BlockAndValueMapping &mapper) {
auto remapOperands = [&](Operation *op) {
for (auto &operand : op->getOpOperands())
- if (auto *mappedOp = mapper.lookupOrNull(operand.get()))
+ if (auto mappedOp = mapper.lookupOrNull(operand.get()))
operand.set(mappedOp);
};
for (auto &block : inlinedBlocks)
@@ -98,7 +98,7 @@ void InlinerInterface::handleTerminator(Operation *op, Block *newDest) const {
/// Handle the given inlined terminator by replacing it with a new operation
/// as necessary.
void InlinerInterface::handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const {
+ ArrayRef<ValuePtr> valuesToRepl) const {
auto *handler = getInterfaceFor(op);
assert(handler && "expected valid dialect handler");
handler->handleTerminator(op, valuesToRepl);
@@ -137,7 +137,7 @@ static bool isLegalToInline(InlinerInterface &interface, Region *src,
LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
Operation *inlinePoint,
BlockAndValueMapping &mapper,
- ArrayRef<Value *> resultsToReplace,
+ ArrayRef<ValuePtr> resultsToReplace,
Optional<Location> inlineLoc,
bool shouldCloneInlinedRegion) {
// We expect the region to have at least one block.
@@ -147,7 +147,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
// Check that all of the region arguments have been mapped.
auto *srcEntryBlock = &src->front();
if (llvm::any_of(srcEntryBlock->getArguments(),
- [&](BlockArgument *arg) { return !mapper.contains(arg); }))
+ [&](BlockArgumentPtr arg) { return !mapper.contains(arg); }))
return failure();
// The insertion point must be within a block.
@@ -207,7 +207,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
} else {
// Otherwise, there were multiple blocks inlined. Add arguments to the post
// insertion block to represent the results to replace.
- for (Value *resultToRepl : resultsToReplace) {
+ for (ValuePtr resultToRepl : resultsToReplace) {
resultToRepl->replaceAllUsesWith(
postInsertBlock->addArgument(resultToRepl->getType()));
}
@@ -229,8 +229,8 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
/// in-favor of the region arguments when inlining.
LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
Operation *inlinePoint,
- ArrayRef<Value *> inlinedOperands,
- ArrayRef<Value *> resultsToReplace,
+ ArrayRef<ValuePtr> inlinedOperands,
+ ArrayRef<ValuePtr> resultsToReplace,
Optional<Location> inlineLoc,
bool shouldCloneInlinedRegion) {
// We expect the region to have at least one block.
@@ -246,7 +246,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
for (unsigned i = 0, e = inlinedOperands.size(); i != e; ++i) {
// Verify that the types of the provided values match the function argument
// types.
- BlockArgument *regionArg = entryBlock->getArgument(i);
+ BlockArgumentPtr regionArg = entryBlock->getArgument(i);
if (inlinedOperands[i]->getType() != regionArg->getType())
return failure();
mapper.map(regionArg, inlinedOperands[i]);
@@ -259,10 +259,10 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
/// Utility function used to generate a cast operation from the given interface,
/// or return nullptr if a cast could not be generated.
-static Value *materializeConversion(const DialectInlinerInterface *interface,
- SmallVectorImpl<Operation *> &castOps,
- OpBuilder &castBuilder, Value *arg,
- Type type, Location conversionLoc) {
+static ValuePtr materializeConversion(const DialectInlinerInterface *interface,
+ SmallVectorImpl<Operation *> &castOps,
+ OpBuilder &castBuilder, ValuePtr arg,
+ Type type, Location conversionLoc) {
if (!interface)
return nullptr;
@@ -297,8 +297,8 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface,
// Make sure that the number of arguments and results matchup between the call
// and the region.
- SmallVector<Value *, 8> callOperands(call.getArgOperands());
- SmallVector<Value *, 8> callResults(call.getOperation()->getResults());
+ SmallVector<ValuePtr, 8> callOperands(call.getArgOperands());
+ SmallVector<ValuePtr, 8> callResults(call.getOperation()->getResults());
if (callOperands.size() != entryBlock->getNumArguments() ||
callResults.size() != callableResultTypes.size())
return failure();
@@ -325,8 +325,8 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface,
// Map the provided call operands to the arguments of the region.
BlockAndValueMapping mapper;
for (unsigned i = 0, e = callOperands.size(); i != e; ++i) {
- BlockArgument *regionArg = entryBlock->getArgument(i);
- Value *operand = callOperands[i];
+ BlockArgumentPtr regionArg = entryBlock->getArgument(i);
+ ValuePtr operand = callOperands[i];
// If the call operand doesn't match the expected region argument, try to
// generate a cast.
@@ -342,13 +342,13 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface,
// Ensure that the resultant values of the call, match the callable.
castBuilder.setInsertionPointAfter(call);
for (unsigned i = 0, e = callResults.size(); i != e; ++i) {
- Value *callResult = callResults[i];
+ ValuePtr callResult = callResults[i];
if (callResult->getType() == callableResultTypes[i])
continue;
// Generate a conversion that will produce the original type, so that the IR
// is still valid after the original call gets replaced.
- Value *castResult =
+ ValuePtr castResult =
materializeConversion(callInterface, castOps, castBuilder, callResult,
callResult->getType(), castLoc);
if (!castResult)
diff --git a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
index fd803390ce7..d5cda3265de 100644
--- a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
@@ -45,7 +45,7 @@ using namespace mlir;
// Gathers all load and store memref accesses in 'opA' into 'values', where
// 'values[memref] == true' for each store operation.
static void getLoadAndStoreMemRefAccesses(Operation *opA,
- DenseMap<Value *, bool> &values) {
+ DenseMap<ValuePtr, bool> &values) {
opA->walk([&](Operation *op) {
if (auto loadOp = dyn_cast<AffineLoadOp>(op)) {
if (values.count(loadOp.getMemRef()) == 0)
@@ -60,7 +60,7 @@ static void getLoadAndStoreMemRefAccesses(Operation *opA,
// accessed 'values' and at least one of the access is a store operation.
// Returns false otherwise.
static bool isDependentLoadOrStoreOp(Operation *op,
- DenseMap<Value *, bool> &values) {
+ DenseMap<ValuePtr, bool> &values) {
if (auto loadOp = dyn_cast<AffineLoadOp>(op)) {
return values.count(loadOp.getMemRef()) > 0 &&
values[loadOp.getMemRef()] == true;
@@ -75,7 +75,7 @@ static bool isDependentLoadOrStoreOp(Operation *op,
static Operation *getFirstDependentOpInRange(Operation *opA, Operation *opB) {
// Record memref values from all loads/store in loop nest rooted at 'opA'.
// Map from memref value to bool which is true if store, false otherwise.
- DenseMap<Value *, bool> values;
+ DenseMap<ValuePtr, bool> values;
getLoadAndStoreMemRefAccesses(opA, values);
// For each 'opX' in block in range ('opA', 'opB'), check if there is a data
@@ -101,7 +101,7 @@ static Operation *getFirstDependentOpInRange(Operation *opA, Operation *opB) {
static Operation *getLastDependentOpInRange(Operation *opA, Operation *opB) {
// Record memref values from all loads/store in loop nest rooted at 'opB'.
// Map from memref value to bool which is true if store, false otherwise.
- DenseMap<Value *, bool> values;
+ DenseMap<ValuePtr, bool> values;
getLoadAndStoreMemRefAccesses(opB, values);
// For each 'opX' in block in range ('opA', 'opB') in reverse order,
@@ -121,8 +121,8 @@ static Operation *getLastDependentOpInRange(Operation *opA, Operation *opB) {
}
return WalkResult::advance();
}
- for (auto *value : op->getResults()) {
- for (auto *user : value->getUsers()) {
+ for (auto value : op->getResults()) {
+ for (auto user : value->getUsers()) {
SmallVector<AffineForOp, 4> loops;
// Check if any loop in loop nest surrounding 'user' is 'opB'.
getLoopIVs(*user, &loops);
@@ -443,7 +443,7 @@ bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats,
// Subtract from operation count the loads/store we expect load/store
// forwarding to remove.
unsigned storeCount = 0;
- llvm::SmallDenseSet<Value *, 4> storeMemrefs;
+ llvm::SmallDenseSet<ValuePtr, 4> storeMemrefs;
srcForOp.walk([&](Operation *op) {
if (auto storeOp = dyn_cast<AffineStoreOp>(op)) {
storeMemrefs.insert(storeOp.getMemRef());
@@ -455,7 +455,7 @@ bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats,
computeCostMap[insertPointParent] = -storeCount;
// Subtract out any load users of 'storeMemrefs' nested below
// 'insertPointParent'.
- for (auto *value : storeMemrefs) {
+ for (auto value : storeMemrefs) {
for (auto *user : value->getUsers()) {
if (auto loadOp = dyn_cast<AffineLoadOp>(user)) {
SmallVector<AffineForOp, 4> loops;
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 3691aee4870..bc1ced408a9 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -52,7 +52,7 @@ using llvm::SmallMapVector;
/// expression.
void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
- SmallVectorImpl<Value *> *operands,
+ SmallVectorImpl<ValuePtr> *operands,
OpBuilder &b) {
auto lbMap = forOp.getLowerBoundMap();
@@ -63,7 +63,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
}
AffineMap tripCountMap;
- SmallVector<Value *, 4> tripCountOperands;
+ SmallVector<ValuePtr, 4> tripCountOperands;
buildTripCountMapAndOperands(forOp, &tripCountMap, &tripCountOperands);
// Sometimes the trip count cannot be expressed as an affine expression.
@@ -82,7 +82,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
// lb + tr1 - tr1 % ufactor, lb + tr2 - tr2 % ufactor; the results of all
// these affine.apply's make up the cleanup loop lower bound.
SmallVector<AffineExpr, 4> bumpExprs(tripCountMap.getNumResults());
- SmallVector<Value *, 4> bumpValues(tripCountMap.getNumResults());
+ SmallVector<ValuePtr, 4> bumpValues(tripCountMap.getNumResults());
for (unsigned i = 0, e = tripCountMap.getNumResults(); i < e; i++) {
auto tripCountExpr = tripCountMap.getResult(i);
bumpExprs[i] = (tripCountExpr - tripCountExpr % unrollFactor) * step;
@@ -105,7 +105,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
*map = simplifyAffineMap(*map);
canonicalizeMapAndOperands(map, operands);
// Remove any affine.apply's that became dead from the simplification above.
- for (auto *v : bumpValues) {
+ for (auto v : bumpValues) {
if (v->use_empty()) {
v->getDefiningOp()->erase();
}
@@ -127,7 +127,7 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
return failure();
// Replaces all IV uses to its single iteration value.
- auto *iv = forOp.getInductionVar();
+ auto iv = forOp.getInductionVar();
Operation *op = forOp.getOperation();
if (!iv->use_empty()) {
if (forOp.hasConstantLowerBound()) {
@@ -137,7 +137,7 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
iv->replaceAllUsesWith(constOp);
} else {
AffineBound lb = forOp.getLowerBound();
- SmallVector<Value *, 4> lbOperands(lb.operand_begin(), lb.operand_end());
+ SmallVector<ValuePtr, 4> lbOperands(lb.operand_begin(), lb.operand_end());
OpBuilder builder(op->getBlock(), Block::iterator(op));
if (lb.getMap() == builder.getDimIdentityMap()) {
// No need of generating an affine.apply.
@@ -178,8 +178,8 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
const std::vector<std::pair<uint64_t, ArrayRef<Operation *>>>
&instGroupQueue,
unsigned offset, AffineForOp srcForInst, OpBuilder b) {
- SmallVector<Value *, 4> lbOperands(srcForInst.getLowerBoundOperands());
- SmallVector<Value *, 4> ubOperands(srcForInst.getUpperBoundOperands());
+ SmallVector<ValuePtr, 4> lbOperands(srcForInst.getLowerBoundOperands());
+ SmallVector<ValuePtr, 4> ubOperands(srcForInst.getUpperBoundOperands());
assert(lbMap.getNumInputs() == lbOperands.size());
assert(ubMap.getNumInputs() == ubOperands.size());
@@ -187,8 +187,8 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
auto loopChunk =
b.create<AffineForOp>(srcForInst.getLoc(), lbOperands, lbMap, ubOperands,
ubMap, srcForInst.getStep());
- auto *loopChunkIV = loopChunk.getInductionVar();
- auto *srcIV = srcForInst.getInductionVar();
+ auto loopChunkIV = loopChunk.getInductionVar();
+ auto srcIV = srcForInst.getInductionVar();
BlockAndValueMapping operandMap;
@@ -449,7 +449,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
OpBuilder builder(op->getBlock(), ++Block::iterator(op));
auto cleanupForInst = cast<AffineForOp>(builder.clone(*op));
AffineMap cleanupMap;
- SmallVector<Value *, 4> cleanupOperands;
+ SmallVector<ValuePtr, 4> cleanupOperands;
getCleanupLoopLowerBound(forOp, unrollFactor, &cleanupMap, &cleanupOperands,
builder);
assert(cleanupMap &&
@@ -477,7 +477,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
Block::iterator srcBlockEnd = std::prev(forOp.getBody()->end(), 2);
// Unroll the contents of 'forOp' (append unrollFactor-1 additional copies).
- auto *forOpIV = forOp.getInductionVar();
+ auto forOpIV = forOp.getInductionVar();
for (unsigned i = 1; i < unrollFactor; i++) {
BlockAndValueMapping operandMap;
@@ -669,8 +669,8 @@ void mlir::sinkLoop(AffineForOp forOp, unsigned loopDepth) {
// ...
// }
// ```
-static void augmentMapAndBounds(OpBuilder &b, Value *iv, AffineMap *map,
- SmallVector<Value *, 4> *operands,
+static void augmentMapAndBounds(OpBuilder &b, ValuePtr iv, AffineMap *map,
+ SmallVector<ValuePtr, 4> *operands,
int64_t offset = 0) {
auto bounds = llvm::to_vector<4>(map->getResults());
bounds.push_back(b.getAffineDimExpr(map->getNumDims()) + offset);
@@ -699,16 +699,16 @@ stripmineSink(AffineForOp forOp, uint64_t factor,
// Lower-bound map creation.
auto lbMap = forOp.getLowerBoundMap();
- SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
+ SmallVector<ValuePtr, 4> lbOperands(forOp.getLowerBoundOperands());
augmentMapAndBounds(b, forOp.getInductionVar(), &lbMap, &lbOperands);
// Upper-bound map creation.
auto ubMap = forOp.getUpperBoundMap();
- SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands());
+ SmallVector<ValuePtr, 4> ubOperands(forOp.getUpperBoundOperands());
augmentMapAndBounds(b, forOp.getInductionVar(), &ubMap, &ubOperands,
/*offset=*/scaledStep);
- auto *iv = forOp.getInductionVar();
+ auto iv = forOp.getInductionVar();
SmallVector<AffineForOp, 8> innerLoops;
for (auto t : targets) {
// Insert newForOp before the terminator of `t`.
@@ -729,10 +729,10 @@ stripmineSink(AffineForOp forOp, uint64_t factor,
return innerLoops;
}
-static Loops stripmineSink(loop::ForOp forOp, Value *factor,
+static Loops stripmineSink(loop::ForOp forOp, ValuePtr factor,
ArrayRef<loop::ForOp> targets) {
- auto *originalStep = forOp.step();
- auto *iv = forOp.getInductionVar();
+ auto originalStep = forOp.step();
+ auto iv = forOp.getInductionVar();
OpBuilder b(forOp);
forOp.setStep(b.create<MulIOp>(forOp.getLoc(), originalStep, factor));
@@ -745,10 +745,10 @@ static Loops stripmineSink(loop::ForOp forOp, Value *factor,
// Insert newForOp before the terminator of `t`.
OpBuilder b(t.getBodyBuilder());
- Value *stepped = b.create<AddIOp>(t.getLoc(), iv, forOp.step());
- Value *less = b.create<CmpIOp>(t.getLoc(), CmpIPredicate::slt,
- forOp.upperBound(), stepped);
- Value *ub =
+ ValuePtr stepped = b.create<AddIOp>(t.getLoc(), iv, forOp.step());
+ ValuePtr less = b.create<CmpIOp>(t.getLoc(), CmpIPredicate::slt,
+ forOp.upperBound(), stepped);
+ ValuePtr ub =
b.create<SelectOp>(t.getLoc(), less, forOp.upperBound(), stepped);
// Splice [begin, begin + nOps - 1) into `newForOp` and replace uses.
@@ -799,7 +799,7 @@ mlir::tile(ArrayRef<AffineForOp> forOps, ArrayRef<uint64_t> sizes,
}
SmallVector<Loops, 8> mlir::tile(ArrayRef<loop::ForOp> forOps,
- ArrayRef<Value *> sizes,
+ ArrayRef<ValuePtr> sizes,
ArrayRef<loop::ForOp> targets) {
return tileImpl(forOps, sizes, targets);
}
@@ -821,13 +821,13 @@ SmallVector<AffineForOp, 8> mlir::tile(ArrayRef<AffineForOp> forOps,
return tileImpl(forOps, sizes, target);
}
-Loops mlir::tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value *> sizes,
+Loops mlir::tile(ArrayRef<loop::ForOp> forOps, ArrayRef<ValuePtr> sizes,
loop::ForOp target) {
return tileImpl(forOps, sizes, target);
}
Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp,
- ArrayRef<Value *> sizes) {
+ ArrayRef<ValuePtr> sizes) {
// Collect perfectly nested loops. If more size values provided than nested
// loops available, truncate `sizes`.
SmallVector<loop::ForOp, 4> forOps;
@@ -842,14 +842,15 @@ Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp,
// Build the IR that performs ceil division of a positive value by a constant:
// ceildiv(a, B) = divis(a + (B-1), B)
// where divis is rounding-to-zero division.
-static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend,
- int64_t divisor) {
+static ValuePtr ceilDivPositive(OpBuilder &builder, Location loc,
+ ValuePtr dividend, int64_t divisor) {
assert(divisor > 0 && "expected positive divisor");
assert(dividend->getType().isIndex() && "expected index-typed value");
- Value *divisorMinusOneCst = builder.create<ConstantIndexOp>(loc, divisor - 1);
- Value *divisorCst = builder.create<ConstantIndexOp>(loc, divisor);
- Value *sum = builder.create<AddIOp>(loc, dividend, divisorMinusOneCst);
+ ValuePtr divisorMinusOneCst =
+ builder.create<ConstantIndexOp>(loc, divisor - 1);
+ ValuePtr divisorCst = builder.create<ConstantIndexOp>(loc, divisor);
+ ValuePtr sum = builder.create<AddIOp>(loc, dividend, divisorMinusOneCst);
return builder.create<SignedDivIOp>(loc, sum, divisorCst);
}
@@ -857,13 +858,13 @@ static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend,
// positive value:
// ceildiv(a, b) = divis(a + (b - 1), b)
// where divis is rounding-to-zero division.
-static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend,
- Value *divisor) {
+static ValuePtr ceilDivPositive(OpBuilder &builder, Location loc,
+ ValuePtr dividend, ValuePtr divisor) {
assert(dividend->getType().isIndex() && "expected index-typed value");
- Value *cstOne = builder.create<ConstantIndexOp>(loc, 1);
- Value *divisorMinusOne = builder.create<SubIOp>(loc, divisor, cstOne);
- Value *sum = builder.create<AddIOp>(loc, dividend, divisorMinusOne);
+ ValuePtr cstOne = builder.create<ConstantIndexOp>(loc, 1);
+ ValuePtr divisorMinusOne = builder.create<SubIOp>(loc, divisor, cstOne);
+ ValuePtr sum = builder.create<AddIOp>(loc, dividend, divisorMinusOne);
return builder.create<SignedDivIOp>(loc, sum, divisor);
}
@@ -945,7 +946,7 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp,
// iterations. Given that the loop current executes
// numIterations = ceildiv((upperBound - lowerBound), step)
// iterations, we need to tile with size ceildiv(numIterations, size[i]).
- SmallVector<Value *, 4> tileSizes;
+ SmallVector<ValuePtr, 4> tileSizes;
tileSizes.reserve(sizes.size());
for (unsigned i = 0, e = sizes.size(); i < e; ++i) {
assert(sizes[i] > 0 && "expected strictly positive size for strip-mining");
@@ -953,10 +954,10 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp,
auto forOp = forOps[i];
OpBuilder builder(forOp);
auto loc = forOp.getLoc();
- Value *diff =
+ ValuePtr diff =
builder.create<SubIOp>(loc, forOp.upperBound(), forOp.lowerBound());
- Value *numIterations = ceilDivPositive(builder, loc, diff, forOp.step());
- Value *iterationsPerBlock =
+ ValuePtr numIterations = ceilDivPositive(builder, loc, diff, forOp.step());
+ ValuePtr iterationsPerBlock =
ceilDivPositive(builder, loc, numIterations, sizes[i]);
tileSizes.push_back(iterationsPerBlock);
}
@@ -976,7 +977,7 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp,
// Replaces all uses of `orig` with `replacement` except if the user is listed
// in `exceptions`.
static void
-replaceAllUsesExcept(Value *orig, Value *replacement,
+replaceAllUsesExcept(ValuePtr orig, ValuePtr replacement,
const SmallPtrSetImpl<Operation *> &exceptions) {
for (auto &use : llvm::make_early_inc_range(orig->getUses())) {
if (exceptions.count(use.getOwner()) == 0)
@@ -1018,30 +1019,30 @@ static void normalizeLoop(loop::ForOp loop, loop::ForOp outer,
// of the loop to go from 0 to the number of iterations, if necessary.
// TODO(zinenko): introduce support for negative steps or emit dynamic asserts
// on step positivity, whatever gets implemented first.
- Value *diff =
+ ValuePtr diff =
builder.create<SubIOp>(loc, loop.upperBound(), loop.lowerBound());
- Value *numIterations = ceilDivPositive(builder, loc, diff, loop.step());
+ ValuePtr numIterations = ceilDivPositive(builder, loc, diff, loop.step());
loop.setUpperBound(numIterations);
- Value *lb = loop.lowerBound();
+ ValuePtr lb = loop.lowerBound();
if (!isZeroBased) {
- Value *cst0 = builder.create<ConstantIndexOp>(loc, 0);
+ ValuePtr cst0 = builder.create<ConstantIndexOp>(loc, 0);
loop.setLowerBound(cst0);
}
- Value *step = loop.step();
+ ValuePtr step = loop.step();
if (!isStepOne) {
- Value *cst1 = builder.create<ConstantIndexOp>(loc, 1);
+ ValuePtr cst1 = builder.create<ConstantIndexOp>(loc, 1);
loop.setStep(cst1);
}
// Insert code computing the value of the original loop induction variable
// from the "normalized" one.
builder.setInsertionPointToStart(inner.getBody());
- Value *scaled =
+ ValuePtr scaled =
isStepOne ? loop.getInductionVar()
: builder.create<MulIOp>(loc, loop.getInductionVar(), step);
- Value *shifted =
+ ValuePtr shifted =
isZeroBased ? scaled : builder.create<AddIOp>(loc, scaled, lb);
SmallPtrSet<Operation *, 2> preserve{scaled->getDefiningOp(),
@@ -1065,7 +1066,7 @@ void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) {
// of the number of iterations of all loops.
OpBuilder builder(outermost);
Location loc = outermost.getLoc();
- Value *upperBound = outermost.upperBound();
+ ValuePtr upperBound = outermost.upperBound();
for (auto loop : loops.drop_front())
upperBound = builder.create<MulIOp>(loc, upperBound, loop.upperBound());
outermost.setUpperBound(upperBound);
@@ -1080,16 +1081,16 @@ void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) {
// iv_i = floordiv(iv_linear, product-of-loop-ranges-until-i) mod range_i.
// Compute these iteratively from the innermost loop by creating a "running
// quotient" of division by the range.
- Value *previous = outermost.getInductionVar();
+ ValuePtr previous = outermost.getInductionVar();
for (unsigned i = 0, e = loops.size(); i < e; ++i) {
unsigned idx = loops.size() - i - 1;
if (i != 0)
previous = builder.create<SignedDivIOp>(loc, previous,
loops[idx + 1].upperBound());
- Value *iv = (i == e - 1) ? previous
- : builder.create<SignedRemIOp>(
- loc, previous, loops[idx].upperBound());
+ ValuePtr iv = (i == e - 1) ? previous
+ : builder.create<SignedRemIOp>(
+ loc, previous, loops[idx].upperBound());
replaceAllUsesInRegionWith(loops[idx].getInductionVar(), iv,
loops.back().region());
}
@@ -1105,24 +1106,24 @@ void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) {
}
void mlir::mapLoopToProcessorIds(loop::ForOp forOp,
- ArrayRef<Value *> processorId,
- ArrayRef<Value *> numProcessors) {
+ ArrayRef<ValuePtr> processorId,
+ ArrayRef<ValuePtr> numProcessors) {
assert(processorId.size() == numProcessors.size());
if (processorId.empty())
return;
OpBuilder b(forOp);
Location loc(forOp.getLoc());
- Value *mul = processorId.front();
+ ValuePtr mul = processorId.front();
for (unsigned i = 1, e = processorId.size(); i < e; ++i)
mul = b.create<AddIOp>(loc, b.create<MulIOp>(loc, mul, numProcessors[i]),
processorId[i]);
- Value *lb = b.create<AddIOp>(loc, forOp.lowerBound(),
- b.create<MulIOp>(loc, forOp.step(), mul));
+ ValuePtr lb = b.create<AddIOp>(loc, forOp.lowerBound(),
+ b.create<MulIOp>(loc, forOp.step(), mul));
forOp.setLowerBound(lb);
- Value *step = forOp.step();
- for (auto *numProcs : numProcessors)
+ ValuePtr step = forOp.step();
+ for (auto numProcs : numProcessors)
step = b.create<MulIOp>(loc, step, numProcs);
forOp.setStep(step);
}
@@ -1139,7 +1140,7 @@ findHighestBlockForPlacement(const MemRefRegion &region, Block &block,
Block::iterator *copyInPlacementStart,
Block::iterator *copyOutPlacementStart) {
const auto *cst = region.getConstraints();
- SmallVector<Value *, 4> symbols;
+ SmallVector<ValuePtr, 4> symbols;
cst->getIdValues(cst->getNumDimIds(), cst->getNumDimAndSymbolIds(), &symbols);
SmallVector<AffineForOp, 4> enclosingFors;
@@ -1202,10 +1203,10 @@ static void getMultiLevelStrides(const MemRefRegion &region,
/// returns the outermost AffineForOp of the copy loop nest. `memIndicesStart'
/// holds the lower coordinates of the region in the original memref to copy
/// in/out. If `copyOut' is true, generates a copy-out; otherwise a copy-in.
-static AffineForOp generatePointWiseCopy(Location loc, Value *memref,
- Value *fastMemRef,
+static AffineForOp generatePointWiseCopy(Location loc, ValuePtr memref,
+ ValuePtr fastMemRef,
AffineMap memAffineMap,
- ArrayRef<Value *> memIndicesStart,
+ ArrayRef<ValuePtr> memIndicesStart,
ArrayRef<int64_t> fastBufferShape,
bool isCopyOut, OpBuilder b) {
assert(!memIndicesStart.empty() && "only 1-d or more memrefs");
@@ -1215,7 +1216,7 @@ static AffineForOp generatePointWiseCopy(Location loc, Value *memref,
// for y = ...
// fast_buf[x][y] = buf[mem_x + x][mem_y + y]
- SmallVector<Value *, 4> fastBufIndices, memIndices;
+ SmallVector<ValuePtr, 4> fastBufIndices, memIndices;
AffineForOp copyNestRoot;
for (unsigned d = 0, e = fastBufferShape.size(); d < e; ++d) {
auto forOp = b.create<AffineForOp>(loc, 0, fastBufferShape[d]);
@@ -1224,7 +1225,7 @@ static AffineForOp generatePointWiseCopy(Location loc, Value *memref,
b = forOp.getBodyBuilder();
fastBufIndices.push_back(forOp.getInductionVar());
- Value *memBase =
+ ValuePtr memBase =
(memAffineMap == b.getMultiDimIdentityMap(memAffineMap.getNumDims()))
? memIndicesStart[d]
: b.create<AffineApplyOp>(
@@ -1277,7 +1278,7 @@ static LogicalResult generateCopy(
const MemRefRegion &region, Block *block, Block::iterator begin,
Block::iterator end, Block *copyPlacementBlock,
Block::iterator copyInPlacementStart, Block::iterator copyOutPlacementStart,
- AffineCopyOptions copyOptions, DenseMap<Value *, Value *> &fastBufferMap,
+ AffineCopyOptions copyOptions, DenseMap<ValuePtr, ValuePtr> &fastBufferMap,
DenseSet<Operation *> &copyNests, uint64_t *sizeInBytes,
Block::iterator *nBegin, Block::iterator *nEnd) {
*nBegin = begin;
@@ -1285,7 +1286,7 @@ static LogicalResult generateCopy(
FuncOp f = begin->getParentOfType<FuncOp>();
OpBuilder topBuilder(f.getBody());
- Value *zeroIndex = topBuilder.create<ConstantIndexOp>(f.getLoc(), 0);
+ ValuePtr zeroIndex = topBuilder.create<ConstantIndexOp>(f.getLoc(), 0);
if (begin == end)
return success();
@@ -1305,7 +1306,7 @@ static LogicalResult generateCopy(
OpBuilder top(func.getBody());
auto loc = region.loc;
- auto *memref = region.memref;
+ auto memref = region.memref;
auto memRefType = memref->getType().cast<MemRefType>();
auto layoutMaps = memRefType.getAffineMaps();
@@ -1317,9 +1318,9 @@ static LogicalResult generateCopy(
// Indices to use for the copying.
// Indices for the original memref being copied from/to.
- SmallVector<Value *, 4> memIndices;
+ SmallVector<ValuePtr, 4> memIndices;
// Indices for the faster buffer being copied into/from.
- SmallVector<Value *, 4> bufIndices;
+ SmallVector<ValuePtr, 4> bufIndices;
unsigned rank = memRefType.getRank();
SmallVector<int64_t, 4> fastBufferShape;
@@ -1345,7 +1346,7 @@ static LogicalResult generateCopy(
// 'regionSymbols' hold values that this memory region is symbolic/parametric
// on; these typically include loop IVs surrounding the level at which the
// copy generation is being done or other valid symbols in MLIR.
- SmallVector<Value *, 8> regionSymbols;
+ SmallVector<ValuePtr, 8> regionSymbols;
cst->getIdValues(rank, cst->getNumIds(), &regionSymbols);
// Construct the index expressions for the fast memory buffer. The index
@@ -1393,7 +1394,7 @@ static LogicalResult generateCopy(
}
// The faster memory space buffer.
- Value *fastMemRef;
+ ValuePtr fastMemRef;
// Check if a buffer was already created.
bool existingBuf = fastBufferMap.count(memref) > 0;
@@ -1433,8 +1434,8 @@ static LogicalResult generateCopy(
return failure();
}
- Value *stride = nullptr;
- Value *numEltPerStride = nullptr;
+ ValuePtr stride = nullptr;
+ ValuePtr numEltPerStride = nullptr;
if (!strideInfos.empty()) {
stride = top.create<ConstantIndexOp>(loc, strideInfos[0].stride);
numEltPerStride =
@@ -1473,7 +1474,7 @@ static LogicalResult generateCopy(
copyOptions.tagMemorySpace);
auto tagMemRef = prologue.create<AllocOp>(loc, tagMemRefType);
- SmallVector<Value *, 4> tagIndices({zeroIndex});
+ SmallVector<ValuePtr, 4> tagIndices({zeroIndex});
auto tagAffineMap = b.getMultiDimIdentityMap(tagIndices.size());
fullyComposeAffineMapAndOperands(&tagAffineMap, &tagIndices);
if (!region.isWrite()) {
@@ -1582,7 +1583,7 @@ static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs,
SmallVector<AffineForOp, 4> ivs;
getLoopIVs(*opInst, &ivs);
ivs.resize(numParamLoopIVs);
- SmallVector<Value *, 4> symbols;
+ SmallVector<ValuePtr, 4> symbols;
extractForInductionVars(ivs, &symbols);
regionCst->reset(rank, numParamLoopIVs, 0);
regionCst->setIdValues(rank, rank + numParamLoopIVs, symbols);
@@ -1629,12 +1630,12 @@ uint64_t mlir::affineDataCopyGenerate(Block::iterator begin,
// List of memory regions to copy for. We need a map vector to have a
// guaranteed iteration order to write test cases. CHECK-DAG doesn't help here
// since the alloc's for example are identical except for the SSA id.
- SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4> readRegions;
- SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4> writeRegions;
+ SmallMapVector<ValuePtr, std::unique_ptr<MemRefRegion>, 4> readRegions;
+ SmallMapVector<ValuePtr, std::unique_ptr<MemRefRegion>, 4> writeRegions;
// Map from original memref's to the fast buffers that their accesses are
// replaced with.
- DenseMap<Value *, Value *> fastBufferMap;
+ DenseMap<ValuePtr, ValuePtr> fastBufferMap;
// To check for errors when walking the block.
bool error = false;
@@ -1684,7 +1685,7 @@ uint64_t mlir::affineDataCopyGenerate(Block::iterator begin,
// Attempts to update; returns true if 'region' exists in targetRegions.
auto updateRegion =
- [&](const SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4>
+ [&](const SmallMapVector<ValuePtr, std::unique_ptr<MemRefRegion>, 4>
&targetRegions) {
auto it = targetRegions.find(region->memref);
if (it == targetRegions.end())
@@ -1736,7 +1737,7 @@ uint64_t mlir::affineDataCopyGenerate(Block::iterator begin,
uint64_t totalCopyBuffersSizeInBytes = 0;
bool ret = true;
auto processRegions =
- [&](const SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4>
+ [&](const SmallMapVector<ValuePtr, std::unique_ptr<MemRefRegion>, 4>
&regions) {
for (const auto &regionEntry : regions) {
// For each region, hoist copy in/out past all hoistable
diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp
index b91b189b381..749d5bf1dd0 100644
--- a/mlir/lib/Transforms/Utils/RegionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp
@@ -27,9 +27,9 @@
using namespace mlir;
-void mlir::replaceAllUsesInRegionWith(Value *orig, Value *replacement,
+void mlir::replaceAllUsesInRegionWith(ValuePtr orig, ValuePtr replacement,
Region &region) {
- for (IROperand &use : llvm::make_early_inc_range(orig->getUses())) {
+ for (auto &use : llvm::make_early_inc_range(orig->getUses())) {
if (region.isAncestor(use.getOwner()->getParentRegion()))
use.set(replacement);
}
@@ -63,14 +63,14 @@ void mlir::visitUsedValuesDefinedAbove(
}
void mlir::getUsedValuesDefinedAbove(Region &region, Region &limit,
- llvm::SetVector<Value *> &values) {
+ llvm::SetVector<ValuePtr> &values) {
visitUsedValuesDefinedAbove(region, limit, [&](OpOperand *operand) {
values.insert(operand->get());
});
}
void mlir::getUsedValuesDefinedAbove(MutableArrayRef<Region> regions,
- llvm::SetVector<Value *> &values) {
+ llvm::SetVector<ValuePtr> &values) {
for (Region &region : regions)
getUsedValuesDefinedAbove(region, region, values);
}
@@ -146,8 +146,8 @@ namespace {
class LiveMap {
public:
/// Value methods.
- bool wasProvenLive(Value *value) { return liveValues.count(value); }
- void setProvedLive(Value *value) {
+ bool wasProvenLive(ValuePtr value) { return liveValues.count(value); }
+ void setProvedLive(ValuePtr value) {
changed |= liveValues.insert(value).second;
}
@@ -161,7 +161,7 @@ public:
private:
bool changed = false;
- DenseSet<Value *> liveValues;
+ DenseSet<ValuePtr> liveValues;
DenseSet<Operation *> liveOps;
};
} // namespace
@@ -188,7 +188,7 @@ static bool isUseSpeciallyKnownDead(OpOperand &use, LiveMap &liveMap) {
return false;
}
-static void processValue(Value *value, LiveMap &liveMap) {
+static void processValue(ValuePtr value, LiveMap &liveMap) {
bool provedLive = llvm::any_of(value->getUses(), [&](OpOperand &use) {
if (isUseSpeciallyKnownDead(use, liveMap))
return false;
@@ -222,9 +222,9 @@ static void propagateLiveness(Operation *op, LiveMap &liveMap) {
liveMap.setProvedLive(op);
return;
}
- for (Value *value : op->getResults())
+ for (ValuePtr value : op->getResults())
processValue(value, liveMap);
- bool provedLive = llvm::any_of(op->getResults(), [&](Value *value) {
+ bool provedLive = llvm::any_of(op->getResults(), [&](ValuePtr value) {
return liveMap.wasProvenLive(value);
});
if (provedLive)
@@ -240,7 +240,7 @@ static void propagateLiveness(Region &region, LiveMap &liveMap) {
// faster convergence to a fixed point (we try to visit uses before defs).
for (Operation &op : llvm::reverse(block->getOperations()))
propagateLiveness(&op, liveMap);
- for (Value *value : block->getArguments())
+ for (ValuePtr value : block->getArguments())
processValue(value, liveMap);
}
}
@@ -259,7 +259,7 @@ static void eraseTerminatorSuccessorOperands(Operation *terminator,
// Iterating args in reverse is needed for correctness, to avoid
// shifting later args when earlier args are erased.
unsigned arg = argE - argI - 1;
- Value *value = terminator->getSuccessor(succ)->getArgument(arg);
+ ValuePtr value = terminator->getSuccessor(succ)->getArgument(arg);
if (!liveMap.wasProvenLive(value)) {
terminator->eraseSuccessorOperand(succ, arg);
}
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index 57a92531163..96a6cdc544f 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -47,7 +47,8 @@ static bool isMemRefDereferencingOp(Operation &op) {
}
/// Return the AffineMapAttr associated with memory 'op' on 'memref'.
-static NamedAttribute getAffineMapAttrForMemRef(Operation *op, Value *memref) {
+static NamedAttribute getAffineMapAttrForMemRef(Operation *op,
+ ValuePtr memref) {
return TypeSwitch<Operation *, NamedAttribute>(op)
.Case<AffineDmaStartOp, AffineLoadOp, AffinePrefetchOp, AffineStoreOp,
AffineDmaWaitOp>(
@@ -55,12 +56,10 @@ static NamedAttribute getAffineMapAttrForMemRef(Operation *op, Value *memref) {
}
// Perform the replacement in `op`.
-LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
- Operation *op,
- ArrayRef<Value *> extraIndices,
- AffineMap indexRemap,
- ArrayRef<Value *> extraOperands,
- ArrayRef<Value *> symbolOperands) {
+LogicalResult mlir::replaceAllMemRefUsesWith(
+ ValuePtr oldMemRef, ValuePtr newMemRef, Operation *op,
+ ArrayRef<ValuePtr> extraIndices, AffineMap indexRemap,
+ ArrayRef<ValuePtr> extraOperands, ArrayRef<ValuePtr> symbolOperands) {
unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank();
(void)newMemRefRank; // unused in opt mode
unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank();
@@ -106,13 +105,13 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
NamedAttribute oldMapAttrPair = getAffineMapAttrForMemRef(op, oldMemRef);
AffineMap oldMap = oldMapAttrPair.second.cast<AffineMapAttr>().getValue();
unsigned oldMapNumInputs = oldMap.getNumInputs();
- SmallVector<Value *, 4> oldMapOperands(
+ SmallVector<ValuePtr, 4> oldMapOperands(
op->operand_begin() + memRefOperandPos + 1,
op->operand_begin() + memRefOperandPos + 1 + oldMapNumInputs);
// Apply 'oldMemRefOperands = oldMap(oldMapOperands)'.
- SmallVector<Value *, 4> oldMemRefOperands;
- SmallVector<Value *, 4> affineApplyOps;
+ SmallVector<ValuePtr, 4> oldMemRefOperands;
+ SmallVector<ValuePtr, 4> affineApplyOps;
oldMemRefOperands.reserve(oldMemRefRank);
if (oldMap != builder.getMultiDimIdentityMap(oldMap.getNumDims())) {
for (auto resultExpr : oldMap.getResults()) {
@@ -130,14 +129,14 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
// Construct new indices as a remap of the old ones if a remapping has been
// provided. The indices of a memref come right after it, i.e.,
// at position memRefOperandPos + 1.
- SmallVector<Value *, 4> remapOperands;
+ SmallVector<ValuePtr, 4> remapOperands;
remapOperands.reserve(extraOperands.size() + oldMemRefRank +
symbolOperands.size());
remapOperands.append(extraOperands.begin(), extraOperands.end());
remapOperands.append(oldMemRefOperands.begin(), oldMemRefOperands.end());
remapOperands.append(symbolOperands.begin(), symbolOperands.end());
- SmallVector<Value *, 4> remapOutputs;
+ SmallVector<ValuePtr, 4> remapOutputs;
remapOutputs.reserve(oldMemRefRank);
if (indexRemap &&
@@ -156,11 +155,11 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
remapOutputs.append(remapOperands.begin(), remapOperands.end());
}
- SmallVector<Value *, 4> newMapOperands;
+ SmallVector<ValuePtr, 4> newMapOperands;
newMapOperands.reserve(newMemRefRank);
// Prepend 'extraIndices' in 'newMapOperands'.
- for (auto *extraIndex : extraIndices) {
+ for (auto extraIndex : extraIndices) {
assert(extraIndex->getDefiningOp()->getNumResults() == 1 &&
"single result op's expected to generate these indices");
assert((isValidDim(extraIndex) || isValidSymbol(extraIndex)) &&
@@ -179,7 +178,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
newMap = simplifyAffineMap(newMap);
canonicalizeMapAndOperands(&newMap, &newMapOperands);
// Remove any affine.apply's that became dead as a result of composition.
- for (auto *value : affineApplyOps)
+ for (auto value : affineApplyOps)
if (value->use_empty())
value->getDefiningOp()->erase();
@@ -203,7 +202,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
// Result types don't change. Both memref's are of the same elemental type.
state.types.reserve(op->getNumResults());
- for (auto *result : op->getResults())
+ for (auto result : op->getResults())
state.types.push_back(result->getType());
// Add attribute for 'newMap', other Attributes do not change.
@@ -224,13 +223,11 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
return success();
}
-LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
- ArrayRef<Value *> extraIndices,
- AffineMap indexRemap,
- ArrayRef<Value *> extraOperands,
- ArrayRef<Value *> symbolOperands,
- Operation *domInstFilter,
- Operation *postDomInstFilter) {
+LogicalResult mlir::replaceAllMemRefUsesWith(
+ ValuePtr oldMemRef, ValuePtr newMemRef, ArrayRef<ValuePtr> extraIndices,
+ AffineMap indexRemap, ArrayRef<ValuePtr> extraOperands,
+ ArrayRef<ValuePtr> symbolOperands, Operation *domInstFilter,
+ Operation *postDomInstFilter) {
unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank();
(void)newMemRefRank; // unused in opt mode
unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank();
@@ -331,9 +328,9 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
void mlir::createAffineComputationSlice(
Operation *opInst, SmallVectorImpl<AffineApplyOp> *sliceOps) {
// Collect all operands that are results of affine apply ops.
- SmallVector<Value *, 4> subOperands;
+ SmallVector<ValuePtr, 4> subOperands;
subOperands.reserve(opInst->getNumOperands());
- for (auto *operand : opInst->getOperands())
+ for (auto operand : opInst->getOperands())
if (isa_and_nonnull<AffineApplyOp>(operand->getDefiningOp()))
subOperands.push_back(operand);
@@ -348,7 +345,7 @@ void mlir::createAffineComputationSlice(
// which case there would be nothing to do.
bool localized = true;
for (auto *op : affineApplyOps) {
- for (auto *result : op->getResults()) {
+ for (auto result : op->getResults()) {
for (auto *user : result->getUsers()) {
if (user != opInst) {
localized = false;
@@ -361,7 +358,7 @@ void mlir::createAffineComputationSlice(
return;
OpBuilder builder(opInst);
- SmallVector<Value *, 4> composedOpOperands(subOperands);
+ SmallVector<ValuePtr, 4> composedOpOperands(subOperands);
auto composedMap = builder.getMultiDimIdentityMap(composedOpOperands.size());
fullyComposeAffineMapAndOperands(&composedMap, &composedOpOperands);
@@ -378,7 +375,7 @@ void mlir::createAffineComputationSlice(
// affine apply op above instead of existing ones (subOperands). So, they
// differ from opInst's operands only for those operands in 'subOperands', for
// which they will be replaced by the corresponding one from 'sliceOps'.
- SmallVector<Value *, 4> newOperands(opInst->getOperands());
+ SmallVector<ValuePtr, 4> newOperands(opInst->getOperands());
for (unsigned i = 0, e = newOperands.size(); i < e; i++) {
// Replace the subOperands from among the new operands.
unsigned j, f;
@@ -451,8 +448,8 @@ LogicalResult mlir::normalizeMemRef(AllocOp allocOp) {
newShape[d] = ubConst.getValue() + 1;
}
- auto *oldMemRef = allocOp.getResult();
- SmallVector<Value *, 4> symbolOperands(allocOp.getSymbolicOperands());
+ auto oldMemRef = allocOp.getResult();
+ SmallVector<ValuePtr, 4> symbolOperands(allocOp.getSymbolicOperands());
auto newMemRefType = MemRefType::get(newShape, memrefType.getElementType(),
b.getMultiDimIdentityMap(newRank));
diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp
index e3212d54e42..d8f5b1dc0e4 100644
--- a/mlir/lib/Transforms/Vectorize.cpp
+++ b/mlir/lib/Transforms/Vectorize.cpp
@@ -705,7 +705,7 @@ struct VectorizationState {
// Map of old scalar Operation to new vectorized Operation.
DenseMap<Operation *, Operation *> vectorizationMap;
// Map of old scalar Value to new vectorized Value.
- DenseMap<Value *, Value *> replacementMap;
+ DenseMap<ValuePtr, ValuePtr> replacementMap;
// The strategy drives which loop to vectorize by which amount.
const VectorizationStrategy *strategy;
// Use-def roots. These represent the starting points for the worklist in the
@@ -728,7 +728,7 @@ struct VectorizationState {
OperationFolder *folder;
private:
- void registerReplacement(Value *key, Value *value);
+ void registerReplacement(ValuePtr key, ValuePtr value);
};
} // end namespace
@@ -768,7 +768,7 @@ void VectorizationState::finishVectorizationPattern() {
}
}
-void VectorizationState::registerReplacement(Value *key, Value *value) {
+void VectorizationState::registerReplacement(ValuePtr key, ValuePtr value) {
assert(replacementMap.count(key) == 0 && "replacement already registered");
replacementMap.insert(std::make_pair(key, value));
}
@@ -776,7 +776,7 @@ void VectorizationState::registerReplacement(Value *key, Value *value) {
// Apply 'map' with 'mapOperands' returning resulting values in 'results'.
static void computeMemoryOpIndices(Operation *op, AffineMap map,
ValueRange mapOperands,
- SmallVectorImpl<Value *> &results) {
+ SmallVectorImpl<ValuePtr> &results) {
OpBuilder builder(op);
for (auto resultExpr : map.getResults()) {
auto singleResMap =
@@ -803,7 +803,7 @@ static void computeMemoryOpIndices(Operation *op, AffineMap map,
/// Such special cases force us to delay the vectorization of the stores until
/// the last step. Here we merely register the store operation.
template <typename LoadOrStoreOpPointer>
-static LogicalResult vectorizeRootOrTerminal(Value *iv,
+static LogicalResult vectorizeRootOrTerminal(ValuePtr iv,
LoadOrStoreOpPointer memoryOp,
VectorizationState *state) {
auto memRefType = memoryOp.getMemRef()->getType().template cast<MemRefType>();
@@ -823,7 +823,7 @@ static LogicalResult vectorizeRootOrTerminal(Value *iv,
if (auto load = dyn_cast<AffineLoadOp>(opInst)) {
OpBuilder b(opInst);
ValueRange mapOperands = load.getMapOperands();
- SmallVector<Value *, 8> indices;
+ SmallVector<ValuePtr, 8> indices;
indices.reserve(load.getMemRefType().getRank());
if (load.getAffineMap() !=
b.getMultiDimIdentityMap(load.getMemRefType().getRank())) {
@@ -838,8 +838,7 @@ static LogicalResult vectorizeRootOrTerminal(Value *iv,
LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
LLVM_DEBUG(permutationMap.print(dbgs()));
auto transfer = b.create<vector::TransferReadOp>(
- opInst->getLoc(), vectorType, memoryOp.getMemRef(),
- map(makePtrDynCaster<Value>(), indices),
+ opInst->getLoc(), vectorType, memoryOp.getMemRef(), indices,
AffineMapAttr::get(permutationMap),
// TODO(b/144455320) add a proper padding value, not just 0.0 : f32
state->folder->create<ConstantFloatOp>(b, opInst->getLoc(),
@@ -951,7 +950,8 @@ vectorizeLoopsAndLoadsRecursively(NestedMatch oneMatch,
/// element type.
/// If `type` is not a valid vector type or if the scalar constant is not a
/// valid vector element type, returns nullptr.
-static Value *vectorizeConstant(Operation *op, ConstantOp constant, Type type) {
+static ValuePtr vectorizeConstant(Operation *op, ConstantOp constant,
+ Type type) {
if (!type || !type.isa<VectorType>() ||
!VectorType::isValidElementType(constant.getType())) {
return nullptr;
@@ -989,8 +989,8 @@ static Value *vectorizeConstant(Operation *op, ConstantOp constant, Type type) {
/// vectorization is possible with the above logic. Returns nullptr otherwise.
///
/// TODO(ntv): handle more complex cases.
-static Value *vectorizeOperand(Value *operand, Operation *op,
- VectorizationState *state) {
+static ValuePtr vectorizeOperand(ValuePtr operand, Operation *op,
+ VectorizationState *state) {
LLVM_DEBUG(dbgs() << "\n[early-vect]vectorize operand: ");
LLVM_DEBUG(operand->print(dbgs()));
// 1. If this value has already been vectorized this round, we are done.
@@ -1004,7 +1004,7 @@ static Value *vectorizeOperand(Value *operand, Operation *op,
// been vectorized. This would be invalid IR.
auto it = state->replacementMap.find(operand);
if (it != state->replacementMap.end()) {
- auto *res = it->second;
+ auto res = it->second;
LLVM_DEBUG(dbgs() << "-> delayed replacement by: ");
LLVM_DEBUG(res->print(dbgs()));
return res;
@@ -1047,12 +1047,12 @@ static Operation *vectorizeOneOperation(Operation *opInst,
if (auto store = dyn_cast<AffineStoreOp>(opInst)) {
OpBuilder b(opInst);
- auto *memRef = store.getMemRef();
- auto *value = store.getValueToStore();
- auto *vectorValue = vectorizeOperand(value, opInst, state);
+ auto memRef = store.getMemRef();
+ auto value = store.getValueToStore();
+ auto vectorValue = vectorizeOperand(value, opInst, state);
ValueRange mapOperands = store.getMapOperands();
- SmallVector<Value *, 8> indices;
+ SmallVector<ValuePtr, 8> indices;
indices.reserve(store.getMemRefType().getRank());
if (store.getAffineMap() !=
b.getMultiDimIdentityMap(store.getMemRefType().getRank())) {
@@ -1081,16 +1081,16 @@ static Operation *vectorizeOneOperation(Operation *opInst,
return nullptr;
SmallVector<Type, 8> vectorTypes;
- for (auto *v : opInst->getResults()) {
+ for (auto v : opInst->getResults()) {
vectorTypes.push_back(
VectorType::get(state->strategy->vectorSizes, v->getType()));
}
- SmallVector<Value *, 8> vectorOperands;
- for (auto *v : opInst->getOperands()) {
+ SmallVector<ValuePtr, 8> vectorOperands;
+ for (auto v : opInst->getOperands()) {
vectorOperands.push_back(vectorizeOperand(v, opInst, state));
}
// Check whether a single operand is null. If so, vectorization failed.
- bool success = llvm::all_of(vectorOperands, [](Value *op) { return op; });
+ bool success = llvm::all_of(vectorOperands, [](ValuePtr op) { return op; });
if (!success) {
LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize");
return nullptr;
diff --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp
index 0b105eadf5a..376fc249a18 100644
--- a/mlir/test/EDSC/builder-api-test.cpp
+++ b/mlir/test/EDSC/builder-api-test.cpp
@@ -484,7 +484,7 @@ TEST_FUNC(select_op_i32) {
IndexedValue A(f.getArgument(0));
IndexHandle i, j;
AffineLoopNestBuilder({&i, &j}, {zero, zero}, {one, one}, {1, 1})([&]{
- // This test exercises IndexedValue::operator Value*.
+ // This test exercises IndexedValue::operator Value.
// Without it, one must force conversion to ValueHandle as such:
// edsc::intrinsics::select(
// i == zero, ValueHandle(A(zero, zero)), ValueHandle(ValueA(i, j)))
@@ -802,7 +802,7 @@ TEST_FUNC(affine_if_op) {
};
auto intSet = IntegerSet::get(2, 2, affineExprs, isEq);
- SmallVector<Value *, 4> affineIfArgs = {zero, zero, ten, ten};
+ SmallVector<ValuePtr, 4> affineIfArgs = {zero, zero, ten, ten};
intrinsics::affine_if(intSet, affineIfArgs, /*withElseRegion=*/false);
intrinsics::affine_if(intSet, affineIfArgs, /*withElseRegion=*/true);
diff --git a/mlir/test/lib/TestDialect/TestDialect.cpp b/mlir/test/lib/TestDialect/TestDialect.cpp
index 7462db4544f..12d024f6593 100644
--- a/mlir/test/lib/TestDialect/TestDialect.cpp
+++ b/mlir/test/lib/TestDialect/TestDialect.cpp
@@ -100,7 +100,7 @@ struct TestInlinerInterface : public DialectInlinerInterface {
/// Handle the given inlined terminator by replacing it with a new operation
/// as necessary.
void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const final {
+ ArrayRef<ValuePtr> valuesToRepl) const final {
// Only handle "test.return" here.
auto returnOp = dyn_cast<TestReturnOp>(op);
if (!returnOp)
@@ -117,7 +117,7 @@ struct TestInlinerInterface : public DialectInlinerInterface {
/// operation that takes 'input' as the only operand, and produces a single
/// result of 'resultType'. If a conversion can not be generated, nullptr
/// should be returned.
- Operation *materializeCallConversion(OpBuilder &builder, Value *input,
+ Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input,
Type resultType,
Location conversionLoc) const final {
// Only allow conversion for i16/i32 types.
@@ -231,7 +231,7 @@ static ParseResult parseWrappingRegionOp(OpAsmParser &parser,
// Create a return terminator in the inner region, pass as operand to the
// terminator the returned values from the wrapped operation.
- SmallVector<Value *, 8> return_operands(wrapped_op->getResults());
+ SmallVector<ValuePtr, 8> return_operands(wrapped_op->getResults());
OpBuilder builder(parser.getBuilder().getContext());
builder.setInsertionPointToEnd(&block);
builder.create<TestReturnOp>(wrapped_op->getLoc(), return_operands);
@@ -297,7 +297,7 @@ OpFoldResult TestOpWithRegionFold::fold(ArrayRef<Attribute> operands) {
LogicalResult TestOpWithVariadicResultsAndFolder::fold(
ArrayRef<Attribute> operands, SmallVectorImpl<OpFoldResult> &results) {
- for (Value *input : this->operands()) {
+ for (ValuePtr input : this->operands()) {
results.push_back(input);
}
return success();
diff --git a/mlir/test/lib/TestDialect/TestOps.td b/mlir/test/lib/TestDialect/TestOps.td
index e33d9c26c7f..ea071f0ddf4 100644
--- a/mlir/test/lib/TestDialect/TestOps.td
+++ b/mlir/test/lib/TestDialect/TestOps.td
@@ -644,7 +644,7 @@ def OpSymbolBindingB : TEST_Op<"symbol_binding_b", []> {
let builders = [
OpBuilder<
- "Builder *builder, OperationState &state, Value *operand",
+ "Builder *builder, OperationState &state, ValuePtr operand",
[{
state.types.assign({builder->getIntegerType(32)});
state.addOperands({operand});
diff --git a/mlir/test/lib/TestDialect/TestPatterns.cpp b/mlir/test/lib/TestDialect/TestPatterns.cpp
index 94eb792cc66..1f6224dba3a 100644
--- a/mlir/test/lib/TestDialect/TestPatterns.cpp
+++ b/mlir/test/lib/TestDialect/TestPatterns.cpp
@@ -22,11 +22,12 @@
using namespace mlir;
// Native function for testing NativeCodeCall
-static Value *chooseOperand(Value *input1, Value *input2, BoolAttr choice) {
+static ValuePtr chooseOperand(ValuePtr input1, ValuePtr input2,
+ BoolAttr choice) {
return choice.getValue() ? input1 : input2;
}
-static void createOpI(PatternRewriter &rewriter, Value *input) {
+static void createOpI(PatternRewriter &rewriter, ValuePtr input) {
rewriter.create<OpI>(rewriter.getUnknownLoc(), input);
}
@@ -73,7 +74,7 @@ struct ReturnTypeOpMatch : public RewritePattern {
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const final {
if (auto retTypeFn = dyn_cast<InferTypeOpInterface>(op)) {
- SmallVector<Value *, 4> values(op->getOperands());
+ SmallVector<ValuePtr, 4> values(op->getOperands());
SmallVector<Type, 2> inferedReturnTypes;
if (failed(retTypeFn.inferReturnTypes(op->getLoc(), values,
op->getAttrs(), op->getRegions(),
@@ -132,7 +133,7 @@ struct TestRegionRewriteBlockMovement : public ConversionPattern {
: ConversionPattern("test.region", 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
// Inline this region into the parent region.
auto &parentRegion = *op->getParentRegion();
@@ -165,7 +166,7 @@ struct TestRegionRewriteUndo : public RewritePattern {
// Add an explicitly illegal operation to ensure the conversion fails.
rewriter.create<ILLegalOpF>(op->getLoc(), rewriter.getIntegerType(32));
- rewriter.create<TestValidOp>(op->getLoc(), ArrayRef<Value *>());
+ rewriter.create<TestValidOp>(op->getLoc(), ArrayRef<ValuePtr>());
// Drop this operation.
rewriter.eraseOp(op);
@@ -182,7 +183,7 @@ struct TestDropOpSignatureConversion : public ConversionPattern {
: ConversionPattern("test.drop_region_op", 1, ctx), converter(converter) {
}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
Region &region = op->getRegion(0);
Block *entry = &region.front();
@@ -208,7 +209,7 @@ struct TestPassthroughInvalidOp : public ConversionPattern {
TestPassthroughInvalidOp(MLIRContext *ctx)
: ConversionPattern("test.invalid", 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
rewriter.replaceOpWithNewOp<TestValidOp>(op, llvm::None, operands,
llvm::None);
@@ -220,7 +221,7 @@ struct TestSplitReturnType : public ConversionPattern {
TestSplitReturnType(MLIRContext *ctx)
: ConversionPattern("test.return", 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
// Check for a return of F32.
if (op->getNumOperands() != 1 || !op->getOperand(0)->getType().isF32())
@@ -245,7 +246,7 @@ struct TestChangeProducerTypeI32ToF32 : public ConversionPattern {
TestChangeProducerTypeI32ToF32(MLIRContext *ctx)
: ConversionPattern("test.type_producer", 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
// If the type is I32, change the type to F32.
if (!(*op->result_type_begin()).isInteger(32))
@@ -258,7 +259,7 @@ struct TestChangeProducerTypeF32ToF64 : public ConversionPattern {
TestChangeProducerTypeF32ToF64(MLIRContext *ctx)
: ConversionPattern("test.type_producer", 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
// If the type is F32, change the type to F64.
if (!(*op->result_type_begin()).isF32())
@@ -271,7 +272,7 @@ struct TestChangeProducerTypeF32ToInvalid : public ConversionPattern {
TestChangeProducerTypeF32ToInvalid(MLIRContext *ctx)
: ConversionPattern("test.type_producer", 10, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
// Always convert to B16, even though it is not a legal type. This tests
// that values are unmapped correctly.
@@ -283,7 +284,7 @@ struct TestUpdateConsumerType : public ConversionPattern {
TestUpdateConsumerType(MLIRContext *ctx)
: ConversionPattern("test.type_consumer", 1, ctx) {}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
// Verify that the incoming operand has been successfully remapped to F64.
if (!operands[0]->getType().isF64())
@@ -344,7 +345,7 @@ struct TestTypeConverter : public TypeConverter {
/// Override the hook to materialize a conversion. This is necessary because
/// we generate 1->N type mappings.
Operation *materializeConversion(PatternRewriter &rewriter, Type resultType,
- ArrayRef<Value *> inputs,
+ ArrayRef<ValuePtr> inputs,
Location loc) override {
return rewriter.create<TestCastOp>(loc, resultType, inputs);
}
@@ -467,13 +468,13 @@ struct OneVResOneVOperandOp1Converter
using OpConversionPattern<OneVResOneVOperandOp1>::OpConversionPattern;
PatternMatchResult
- matchAndRewrite(OneVResOneVOperandOp1 op, ArrayRef<Value *> operands,
+ matchAndRewrite(OneVResOneVOperandOp1 op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const override {
auto origOps = op.getOperands();
assert(std::distance(origOps.begin(), origOps.end()) == 1 &&
"One operand expected");
- Value *origOp = *origOps.begin();
- SmallVector<Value *, 2> remappedOperands;
+ ValuePtr origOp = *origOps.begin();
+ SmallVector<ValuePtr, 2> remappedOperands;
// Replicate the remapped original operand twice. Note that we don't used
// the remapped 'operand' since the goal is testing 'getRemappedValue'.
remappedOperands.push_back(rewriter.getRemappedValue(origOp));
diff --git a/mlir/test/lib/Transforms/TestLoopMapping.cpp b/mlir/test/lib/Transforms/TestLoopMapping.cpp
index c25fea9aa13..7f587fc3170 100644
--- a/mlir/test/lib/Transforms/TestLoopMapping.cpp
+++ b/mlir/test/lib/Transforms/TestLoopMapping.cpp
@@ -41,7 +41,7 @@ public:
// SSA values for the transformation are created out of thin air by
// unregistered "new_processor_id_and_range" operations. This is enough to
// emulate mapping conditions.
- SmallVector<Value *, 8> processorIds, numProcessors;
+ SmallVector<ValuePtr, 8> processorIds, numProcessors;
func.walk([&processorIds, &numProcessors](Operation *op) {
if (op->getName().getStringRef() != "new_processor_id_and_range")
return;
diff --git a/mlir/test/lib/Transforms/TestVectorizationUtils.cpp b/mlir/test/lib/Transforms/TestVectorizationUtils.cpp
index 7efc74f2304..35df0631ca7 100644
--- a/mlir/test/lib/Transforms/TestVectorizationUtils.cpp
+++ b/mlir/test/lib/Transforms/TestVectorizationUtils.cpp
@@ -245,7 +245,7 @@ void VectorizerTestPass::testNormalizeMaps() {
for (auto m : matches) {
auto app = cast<AffineApplyOp>(m.getMatchedOperation());
OpBuilder b(m.getMatchedOperation());
- SmallVector<Value *, 8> operands(app.getOperands());
+ SmallVector<ValuePtr, 8> operands(app.getOperands());
makeComposedAffineApply(b, app.getLoc(), app.getAffineMap(), operands);
}
}
diff --git a/mlir/test/mlir-tblgen/op-attribute.td b/mlir/test/mlir-tblgen/op-attribute.td
index fa73697dba8..004e7662299 100644
--- a/mlir/test/mlir-tblgen/op-attribute.td
+++ b/mlir/test/mlir-tblgen/op-attribute.td
@@ -216,9 +216,9 @@ def MixOperandsAndAttrs : NS_Op<"mix_operands_and_attrs", []> {
}
// DEF-LABEL: MixOperandsAndAttrs definitions
-// DEF-DAG: Value *MixOperandsAndAttrs::operand()
-// DEF-DAG: Value *MixOperandsAndAttrs::otherArg()
-// DEF-DAG: void MixOperandsAndAttrs::build(Builder *tblgen_builder, OperationState &tblgen_state, FloatAttr attr, Value *operand, FloatAttr otherAttr, Value *otherArg)
+// DEF-DAG: ValuePtr MixOperandsAndAttrs::operand()
+// DEF-DAG: ValuePtr MixOperandsAndAttrs::otherArg()
+// DEF-DAG: void MixOperandsAndAttrs::build(Builder *tblgen_builder, OperationState &tblgen_state, FloatAttr attr, ValuePtr operand, FloatAttr otherAttr, ValuePtr otherArg)
// DEF-DAG: APFloat MixOperandsAndAttrs::attr()
// DEF-DAG: APFloat MixOperandsAndAttrs::otherAttr()
diff --git a/mlir/test/mlir-tblgen/op-decl.td b/mlir/test/mlir-tblgen/op-decl.td
index a217a139848..55952236429 100644
--- a/mlir/test/mlir-tblgen/op-decl.td
+++ b/mlir/test/mlir-tblgen/op-decl.td
@@ -26,7 +26,7 @@ def NS_AOp : NS_Op<"a_op", [NoSideEffect, NoSideEffect]> {
);
let regions = (region AnyRegion:$someRegion);
- let builders = [OpBuilder<"Value *val">];
+ let builders = [OpBuilder<"ValuePtr val">];
let parser = [{ foo }];
let printer = [{ bar }];
let verifier = [{ baz }];
@@ -46,12 +46,12 @@ def NS_AOp : NS_Op<"a_op", [NoSideEffect, NoSideEffect]> {
// CHECK: class AOpOperandAdaptor {
// CHECK: public:
-// CHECK: AOpOperandAdaptor(ArrayRef<Value *> values);
-// CHECK: ArrayRef<Value *> getODSOperands(unsigned index);
-// CHECK: Value *a();
-// CHECK: ArrayRef<Value *> b();
+// CHECK: AOpOperandAdaptor(ArrayRef<ValuePtr> values);
+// CHECK: ArrayRef<ValuePtr> getODSOperands(unsigned index);
+// CHECK: ValuePtr a();
+// CHECK: ArrayRef<ValuePtr> b();
// CHECK: private:
-// CHECK: ArrayRef<Value *> tblgen_operands;
+// CHECK: ArrayRef<ValuePtr> tblgen_operands;
// CHECK: };
// CHECK: class AOp : public Op<AOp, OpTrait::AtLeastNResults<1>::Impl, OpTrait::HasNoSideEffect, OpTrait::AtLeastNOperands<1>::Impl
@@ -60,18 +60,18 @@ def NS_AOp : NS_Op<"a_op", [NoSideEffect, NoSideEffect]> {
// CHECK: using OperandAdaptor = AOpOperandAdaptor;
// CHECK: static StringRef getOperationName();
// CHECK: Operation::operand_range getODSOperands(unsigned index);
-// CHECK: Value *a();
+// CHECK: ValuePtr a();
// CHECK: Operation::operand_range b();
// CHECK: Operation::result_range getODSResults(unsigned index);
-// CHECK: Value *r();
+// CHECK: ValuePtr r();
// CHECK: Region &someRegion();
// CHECK: IntegerAttr attr1Attr()
// CHECK: APInt attr1();
// CHECK: FloatAttr attr2Attr()
// CHECK: Optional< APFloat > attr2();
-// CHECK: static void build(Value *val);
-// CHECK: static void build(Builder *tblgen_builder, OperationState &tblgen_state, Type r, ArrayRef<Type> s, Value *a, ValueRange b, IntegerAttr attr1, /*optional*/FloatAttr attr2)
-// CHECK: static void build(Builder *tblgen_builder, OperationState &tblgen_state, Type r, ArrayRef<Type> s, Value *a, ValueRange b, APInt attr1, /*optional*/FloatAttr attr2)
+// CHECK: static void build(ValuePtr val);
+// CHECK: static void build(Builder *tblgen_builder, OperationState &tblgen_state, Type r, ArrayRef<Type> s, ValuePtr a, ValueRange b, IntegerAttr attr1, /*optional*/FloatAttr attr2)
+// CHECK: static void build(Builder *tblgen_builder, OperationState &tblgen_state, Type r, ArrayRef<Type> s, ValuePtr a, ValueRange b, APInt attr1, /*optional*/FloatAttr attr2)
// CHECK: static void build(Builder *, OperationState &tblgen_state, ArrayRef<Type> resultTypes, ValueRange operands, ArrayRef<NamedAttribute> attributes)
// CHECK: static ParseResult parse(OpAsmParser &parser, OperationState &result);
// CHECK: void print(OpAsmPrinter &p);
@@ -111,7 +111,7 @@ def NS_DOp : NS_Op<"op_with_two_operands", []> {
def NS_SkipDefaultBuildersOp : NS_Op<"skip_default_builders", []> {
let skipDefaultBuilders = 1;
- let builders = [OpBuilder<"Value *val">];
+ let builders = [OpBuilder<"ValuePtr val">];
}
// CHECK-LABEL: NS::SkipDefaultBuildersOp declarations
diff --git a/mlir/test/mlir-tblgen/op-operand.td b/mlir/test/mlir-tblgen/op-operand.td
index 872cc474a06..c592686ebd3 100644
--- a/mlir/test/mlir-tblgen/op-operand.td
+++ b/mlir/test/mlir-tblgen/op-operand.td
@@ -18,7 +18,7 @@ def OpA : NS_Op<"one_normal_operand_op", []> {
// CHECK-NEXT: tblgen_operands = values
// CHECK: void OpA::build
-// CHECK: Value *input
+// CHECK: ValuePtr input
// CHECK: tblgen_state.addOperands(input);
// CHECK: void OpA::build
@@ -39,19 +39,19 @@ def OpD : NS_Op<"mix_variadic_and_normal_inputs_op", [SameVariadicOperandSize]>
let arguments = (ins Variadic<AnyTensor>:$input1, AnyTensor:$input2, Variadic<AnyTensor>:$input3);
}
-// CHECK-LABEL: ArrayRef<Value *> OpDOperandAdaptor::input1
+// CHECK-LABEL: ArrayRef<ValuePtr> OpDOperandAdaptor::input1
// CHECK-NEXT: return getODSOperands(0);
-// CHECK-LABEL: Value *OpDOperandAdaptor::input2
+// CHECK-LABEL: ValuePtr OpDOperandAdaptor::input2
// CHECK-NEXT: return *getODSOperands(1).begin();
-// CHECK-LABEL: ArrayRef<Value *> OpDOperandAdaptor::input3
+// CHECK-LABEL: ArrayRef<ValuePtr> OpDOperandAdaptor::input3
// CHECK-NEXT: return getODSOperands(2);
// CHECK-LABEL: Operation::operand_range OpD::input1
// CHECK-NEXT: return getODSOperands(0);
-// CHECK-LABEL: Value *OpD::input2
+// CHECK-LABEL: ValuePtr OpD::input2
// CHECK-NEXT: return *getODSOperands(1).begin();
// CHECK-LABEL: OpD::build
diff --git a/mlir/test/mlir-tblgen/op-result.td b/mlir/test/mlir-tblgen/op-result.td
index 4ee631986cc..f9a77ea492e 100644
--- a/mlir/test/mlir-tblgen/op-result.td
+++ b/mlir/test/mlir-tblgen/op-result.td
@@ -23,9 +23,9 @@ def OpB : NS_Op<"same_input_output_type_op", [SameOperandsAndResultType]> {
}
// CHECK-LABEL: OpB definitions
-// CHECK: void OpB::build(Builder *tblgen_builder, OperationState &tblgen_state, Type y, Value *x)
+// CHECK: void OpB::build(Builder *tblgen_builder, OperationState &tblgen_state, Type y, ValuePtr x)
// CHECK: tblgen_state.addTypes(y);
-// CHECK: void OpB::build(Builder *tblgen_builder, OperationState &tblgen_state, Value *x)
+// CHECK: void OpB::build(Builder *tblgen_builder, OperationState &tblgen_state, ValuePtr x)
// CHECK: tblgen_state.addTypes({x->getType()});
def OpC : NS_Op<"three_normal_result_op", []> {
@@ -89,7 +89,7 @@ def OpI : NS_Op<"mix_variadic_and_normal_results_op", [SameVariadicResultSize]>
// CHECK-LABEL: Operation::result_range OpI::output1
// CHECK-NEXT: return getODSResults(0);
-// CHECK-LABEL: Value *OpI::output2
+// CHECK-LABEL: ValuePtr OpI::output2
// CHECK-NEXT: return *getODSResults(1).begin();
// CHECK-LABEL: OpI::build
diff --git a/mlir/test/mlir-tblgen/predicate.td b/mlir/test/mlir-tblgen/predicate.td
index 26a5b746fb4..fef1b139dc9 100644
--- a/mlir/test/mlir-tblgen/predicate.td
+++ b/mlir/test/mlir-tblgen/predicate.td
@@ -16,7 +16,7 @@ def OpA : NS_Op<"op_for_CPred_containing_multiple_same_placeholder", []> {
}
// CHECK-LABEL: OpA::verify
-// CHECK: for (Value *v : getODSOperands(0)) {
+// CHECK: for (ValuePtr v : getODSOperands(0)) {
// CHECK: if (!((v->getType().isInteger(32) || v->getType().isF32())))
def OpB : NS_Op<"op_for_And_PredOpTrait", [
@@ -90,5 +90,5 @@ def OpK : NS_Op<"op_for_AnyTensorOf", []> {
}
// CHECK-LABEL: OpK::verify
-// CHECK: for (Value *v : getODSOperands(0)) {
+// CHECK: for (ValuePtr v : getODSOperands(0)) {
// CHECK: if (!(((v->getType().isa<TensorType>())) && (((v->getType().cast<ShapedType>().getElementType().isF32())) || ((v->getType().cast<ShapedType>().getElementType().isInteger(32))))))
diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
index dd56458ccb3..df8feb855c5 100644
--- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
@@ -713,11 +713,12 @@ void OpEmitter::genAttrGetters() {
// Generates the named operand getter methods for the given Operator `op` and
// puts them in `opClass`. Uses `rangeType` as the return type of getters that
-// return a range of operands (individual operands are `Value *` and each
-// element in the range must also be `Value *`); use `rangeBeginCall` to get an
-// iterator to the beginning of the operand range; use `rangeSizeCall` to obtain
-// the number of operands. `getOperandCallPattern` contains the code necessary
-// to obtain a single operand whose position will be substituted instead of
+// return a range of operands (individual operands are `ValuePtr ` and each
+// element in the range must also be `ValuePtr `); use `rangeBeginCall` to get
+// an iterator to the beginning of the operand range; use `rangeSizeCall` to
+// obtain the number of operands. `getOperandCallPattern` contains the code
+// necessary to obtain a single operand whose position will be substituted
+// instead of
// "{0}" marker in the pattern. Note that the pattern should work for any kind
// of ops, in particular for one-operand ops that may not have the
// `getOperand(unsigned)` method.
@@ -790,7 +791,7 @@ static void generateNamedOperandGetters(const Operator &op, Class &opClass,
auto &m = opClass.newMethod(rangeType, operand.name);
m.body() << " return getODSOperands(" << i << ");";
} else {
- auto &m = opClass.newMethod("Value *", operand.name);
+ auto &m = opClass.newMethod("ValuePtr ", operand.name);
m.body() << " return *getODSOperands(" << i << ").begin();";
}
}
@@ -868,7 +869,7 @@ void OpEmitter::genNamedResultGetters() {
auto &m = opClass.newMethod("Operation::result_range", result.name);
m.body() << " return getODSResults(" << i << ");";
} else {
- auto &m = opClass.newMethod("Value *", result.name);
+ auto &m = opClass.newMethod("ValuePtr ", result.name);
m.body() << " return *getODSResults(" << i << ").begin();";
}
}
@@ -1246,7 +1247,7 @@ void OpEmitter::buildParamList(std::string &paramList,
auto argument = op.getArg(i);
if (argument.is<tblgen::NamedTypeConstraint *>()) {
const auto &operand = op.getOperand(numOperands);
- paramList.append(operand.isVariadic() ? ", ValueRange " : ", Value *");
+ paramList.append(operand.isVariadic() ? ", ValueRange " : ", ValuePtr ");
paramList.append(getArgumentName(op, numOperands));
++numOperands;
} else {
@@ -1535,7 +1536,7 @@ void OpEmitter::genOperandResultVerifier(OpMethodBody &body,
continue;
// Emit a loop to check all the dynamic values in the pack.
- body << formatv(" for (Value *v : getODS{0}{1}s({2})) {{\n",
+ body << formatv(" for (ValuePtr v : getODS{0}{1}s({2})) {{\n",
// Capitalize the first letter to match the function name
valueKind.substr(0, 1).upper(), valueKind.substr(1),
staticValue.index());
@@ -1690,7 +1691,7 @@ void OpEmitter::genOpAsmInterface() {
namespace {
// Helper class to emit Op operand adaptors to an output stream. Operand
-// adaptors are wrappers around ArrayRef<Value *> that provide named operand
+// adaptors are wrappers around ArrayRef<ValuePtr> that provide named operand
// getters identical to those defined in the Op.
class OpOperandAdaptorEmitter {
public:
@@ -1706,12 +1707,12 @@ private:
OpOperandAdaptorEmitter::OpOperandAdaptorEmitter(const Operator &op)
: adapterClass(op.getCppClassName().str() + "OperandAdaptor") {
- adapterClass.newField("ArrayRef<Value *>", "tblgen_operands");
- auto &constructor = adapterClass.newConstructor("ArrayRef<Value *> values");
+ adapterClass.newField("ArrayRef<ValuePtr>", "tblgen_operands");
+ auto &constructor = adapterClass.newConstructor("ArrayRef<ValuePtr> values");
constructor.body() << " tblgen_operands = values;\n";
generateNamedOperandGetters(op, adapterClass,
- /*rangeType=*/"ArrayRef<Value *>",
+ /*rangeType=*/"ArrayRef<ValuePtr>",
/*rangeBeginCall=*/"tblgen_operands.begin()",
/*rangeSizeCall=*/"tblgen_operands.size()",
/*getOperandCallPattern=*/"tblgen_operands[{0}]");
diff --git a/mlir/tools/mlir-tblgen/RewriterGen.cpp b/mlir/tools/mlir-tblgen/RewriterGen.cpp
index b2376e8739c..a74bc23a95a 100644
--- a/mlir/tools/mlir-tblgen/RewriterGen.cpp
+++ b/mlir/tools/mlir-tblgen/RewriterGen.cpp
@@ -576,14 +576,14 @@ void PatternEmitter::emitRewriteLogic() {
os.indent(4) << "rewriter.eraseOp(op0);\n";
} else {
// Process replacement result patterns.
- os.indent(4) << "SmallVector<Value *, 4> tblgen_repl_values;\n";
+ os.indent(4) << "SmallVector<ValuePtr, 4> tblgen_repl_values;\n";
for (int i = replStartIndex; i < numResultPatterns; ++i) {
DagNode resultTree = pattern.getResultPattern(i);
auto val = handleResultPattern(resultTree, offsets[i], 0);
os.indent(4) << "\n";
// Resolve each symbol for all range use so that we can loop over them.
os << symbolInfoMap.getAllRangeUse(
- val, " for (auto *v : {0}) {{ tblgen_repl_values.push_back(v); }",
+ val, " for (auto v : {0}) {{ tblgen_repl_values.push_back(v); }",
"\n");
}
os.indent(4) << "\n";
@@ -819,7 +819,7 @@ std::string PatternEmitter::handleOpCreation(DagNode tree, int resultIndex,
int numResults = resultOp.getNumResults();
if (numResults != 0) {
for (int i = 0; i < numResults; ++i)
- os.indent(6) << formatv("for (auto *v : castedOp0.getODSResults({0})) {{"
+ os.indent(6) << formatv("for (auto v : castedOp0.getODSResults({0})) {{"
"tblgen_types.push_back(v->getType()); }\n",
resultIndex + i);
}
@@ -835,8 +835,8 @@ void PatternEmitter::createSeparateLocalVarsForOpArgs(
Operator &resultOp = node.getDialectOp(opMap);
// Now prepare operands used for building this op:
- // * If the operand is non-variadic, we create a `Value*` local variable.
- // * If the operand is variadic, we create a `SmallVector<Value*>` local
+ // * If the operand is non-variadic, we create a `Value` local variable.
+ // * If the operand is variadic, we create a `SmallVector<Value>` local
// variable.
int valueIndex = 0; // An index for uniquing local variable names.
@@ -851,7 +851,7 @@ void PatternEmitter::createSeparateLocalVarsForOpArgs(
std::string varName;
if (operand->isVariadic()) {
varName = formatv("tblgen_values_{0}", valueIndex++);
- os.indent(6) << formatv("SmallVector<Value *, 4> {0};\n", varName);
+ os.indent(6) << formatv("SmallVector<ValuePtr, 4> {0};\n", varName);
std::string range;
if (node.isNestedDagArg(argIndex)) {
range = childNodeNames[argIndex];
@@ -861,11 +861,11 @@ void PatternEmitter::createSeparateLocalVarsForOpArgs(
// Resolve the symbol for all range use so that we have a uniform way of
// capturing the values.
range = symbolInfoMap.getValueAndRangeUse(range);
- os.indent(6) << formatv("for (auto *v : {0}) {1}.push_back(v);\n", range,
+ os.indent(6) << formatv("for (auto v : {0}) {1}.push_back(v);\n", range,
varName);
} else {
varName = formatv("tblgen_value_{0}", valueIndex++);
- os.indent(6) << formatv("Value *{0} = ", varName);
+ os.indent(6) << formatv("ValuePtr {0} = ", varName);
if (node.isNestedDagArg(argIndex)) {
os << symbolInfoMap.getValueAndRangeUse(childNodeNames[argIndex]);
} else {
@@ -934,7 +934,7 @@ void PatternEmitter::createAggregateLocalVarsForOpArgs(
Operator &resultOp = node.getDialectOp(opMap);
os.indent(6) << formatv(
- "SmallVector<Value *, 4> tblgen_values; (void)tblgen_values;\n");
+ "SmallVector<ValuePtr, 4> tblgen_values; (void)tblgen_values;\n");
os.indent(6) << formatv(
"SmallVector<NamedAttribute, 4> tblgen_attrs; (void)tblgen_attrs;\n");
@@ -975,7 +975,7 @@ void PatternEmitter::createAggregateLocalVarsForOpArgs(
// capturing the values.
range = symbolInfoMap.getValueAndRangeUse(range);
os.indent(6) << formatv(
- "for (auto *v : {0}) tblgen_values.push_back(v);\n", range);
+ "for (auto v : {0}) tblgen_values.push_back(v);\n", range);
} else {
os.indent(6) << formatv("tblgen_values.push_back(", varName);
if (node.isNestedDagArg(argIndex)) {
diff --git a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
index f1712efb319..6d5bcc116ad 100644
--- a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
+++ b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
@@ -470,7 +470,7 @@ static void emitDeserializationFunction(const Record *attrClass,
emitResultDeserialization(op, record->getLoc(), " ", words, wordIndex,
resultTypes, valueID, os);
- os << formatv(" SmallVector<Value *, 4> {0};\n", operands);
+ os << formatv(" SmallVector<ValuePtr, 4> {0};\n", operands);
os << formatv(" SmallVector<NamedAttribute, 4> {0};\n", attributes);
// Operand deserialization
emitOperandDeserialization(op, record->getLoc(), " ", words, wordIndex,
diff --git a/mlir/unittests/IR/OperationSupportTest.cpp b/mlir/unittests/IR/OperationSupportTest.cpp
index 80f82ac3e5d..d7dae4648fe 100644
--- a/mlir/unittests/IR/OperationSupportTest.cpp
+++ b/mlir/unittests/IR/OperationSupportTest.cpp
@@ -25,7 +25,7 @@ using namespace mlir::detail;
namespace {
Operation *createOp(MLIRContext *context, bool resizableOperands,
- ArrayRef<Value *> operands = llvm::None,
+ ArrayRef<ValuePtr> operands = llvm::None,
ArrayRef<Type> resultTypes = llvm::None) {
return Operation::create(
UnknownLoc::get(context), OperationName("foo.bar", context), resultTypes,
@@ -39,7 +39,7 @@ TEST(OperandStorageTest, NonResizable) {
Operation *useOp =
createOp(&context, /*resizableOperands=*/false, /*operands=*/llvm::None,
builder.getIntegerType(16));
- Value *operand = useOp->getResult(0);
+ ValuePtr operand = useOp->getResult(0);
// Create a non-resizable operation with one operand.
Operation *user = createOp(&context, /*resizableOperands=*/false, operand,
@@ -68,7 +68,7 @@ TEST(OperandStorageDeathTest, AddToNonResizable) {
Operation *useOp =
createOp(&context, /*resizableOperands=*/false, /*operands=*/llvm::None,
builder.getIntegerType(16));
- Value *operand = useOp->getResult(0);
+ ValuePtr operand = useOp->getResult(0);
// Create a non-resizable operation with one operand.
Operation *user = createOp(&context, /*resizableOperands=*/false, operand,
@@ -88,7 +88,7 @@ TEST(OperandStorageTest, Resizable) {
Operation *useOp =
createOp(&context, /*resizableOperands=*/false, /*operands=*/llvm::None,
builder.getIntegerType(16));
- Value *operand = useOp->getResult(0);
+ ValuePtr operand = useOp->getResult(0);
// Create a resizable operation with one operand.
Operation *user = createOp(&context, /*resizableOperands=*/true, operand,
OpenPOWER on IntegriCloud