diff options
Diffstat (limited to 'mlir/lib')
38 files changed, 142 insertions, 152 deletions
diff --git a/mlir/lib/Analysis/TestParallelismDetection.cpp b/mlir/lib/Analysis/TestParallelismDetection.cpp index 473d253cfa2..9ae8a311f6d 100644 --- a/mlir/lib/Analysis/TestParallelismDetection.cpp +++ b/mlir/lib/Analysis/TestParallelismDetection.cpp @@ -43,7 +43,7 @@ FunctionPassBase *mlir::createParallelismDetectionTestPass() { // Walks the function and emits a note for all 'affine.for' ops detected as // parallel. void TestParallelismDetection::runOnFunction() { - Function f = getFunction(); + FuncOp f = getFunction(); OpBuilder b(f.getBody()); f.walk<AffineForOp>([&](AffineForOp forOp) { if (isLoopParallel(forOp)) diff --git a/mlir/lib/Conversion/GPUToCUDA/ConvertKernelFuncToCubin.cpp b/mlir/lib/Conversion/GPUToCUDA/ConvertKernelFuncToCubin.cpp index 1dbedf9fcee..8f381604bb5 100644 --- a/mlir/lib/Conversion/GPUToCUDA/ConvertKernelFuncToCubin.cpp +++ b/mlir/lib/Conversion/GPUToCUDA/ConvertKernelFuncToCubin.cpp @@ -75,12 +75,12 @@ public: private: static OwnedCubin compilePtxToCubinForTesting(const std::string &ptx, - Function &function); + FuncOp &function); std::string translateModuleToPtx(llvm::Module &module, llvm::TargetMachine &target_machine); - OwnedCubin convertModuleToCubin(llvm::Module &llvmModule, Function &function); - LogicalResult translateGpuKernelToCubinAnnotation(Function &function); + OwnedCubin convertModuleToCubin(llvm::Module &llvmModule, FuncOp &function); + LogicalResult translateGpuKernelToCubinAnnotation(FuncOp &function); CubinGenerator cubinGenerator; }; @@ -104,13 +104,13 @@ std::string GpuKernelToCubinPass::translateModuleToPtx( OwnedCubin GpuKernelToCubinPass::compilePtxToCubinForTesting(const std::string &ptx, - Function &function) { + FuncOp &function) { const char data[] = "CUBIN"; return llvm::make_unique<std::vector<char>>(data, data + sizeof(data) - 1); } OwnedCubin GpuKernelToCubinPass::convertModuleToCubin(llvm::Module &llvmModule, - Function &function) { + FuncOp &function) { std::unique_ptr<llvm::TargetMachine> targetMachine; { std::string error; @@ -136,7 +136,7 @@ OwnedCubin GpuKernelToCubinPass::convertModuleToCubin(llvm::Module &llvmModule, } LogicalResult -GpuKernelToCubinPass::translateGpuKernelToCubinAnnotation(Function &function) { +GpuKernelToCubinPass::translateGpuKernelToCubinAnnotation(FuncOp &function) { Builder builder(function.getContext()); OwningModuleRef module = builder.createModule(); diff --git a/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp b/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp index dafc5fa5730..24fc706d82f 100644 --- a/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp +++ b/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp @@ -88,9 +88,7 @@ private: LLVM::LLVMType getPointerType() { return llvmPointerType; } - LLVM::LLVMType getPointerPointerType() { - return llvmPointerPointerType; - } + LLVM::LLVMType getPointerPointerType() { return llvmPointerPointerType; } LLVM::LLVMType getInt8Type() { return llvmInt8Type; } @@ -118,7 +116,7 @@ private: void declareCudaFunctions(Location loc); Value *setupParamsArray(gpu::LaunchFuncOp launchOp, OpBuilder &builder); - Value *generateKernelNameConstant(Function kernelFunction, Location &loc, + Value *generateKernelNameConstant(FuncOp kernelFunction, Location &loc, OpBuilder &builder); void translateGpuLaunchCalls(mlir::gpu::LaunchFuncOp launchOp); @@ -156,33 +154,33 @@ void GpuLaunchFuncToCudaCallsPass::declareCudaFunctions(Location loc) { Builder builder(module); if (!module.getNamedFunction(cuModuleLoadName)) { module.push_back( - Function::create(loc, cuModuleLoadName, - builder.getFunctionType( - { - getPointerPointerType(), /* CUmodule *module */ - getPointerType() /* void *cubin */ - }, - getCUResultType()))); + FuncOp::create(loc, cuModuleLoadName, + builder.getFunctionType( + { + getPointerPointerType(), /* CUmodule *module */ + getPointerType() /* void *cubin */ + }, + getCUResultType()))); } if (!module.getNamedFunction(cuModuleGetFunctionName)) { // The helper uses void* instead of CUDA's opaque CUmodule and // CUfunction. module.push_back( - Function::create(loc, cuModuleGetFunctionName, - builder.getFunctionType( - { - getPointerPointerType(), /* void **function */ - getPointerType(), /* void *module */ - getPointerType() /* char *name */ - }, - getCUResultType()))); + FuncOp::create(loc, cuModuleGetFunctionName, + builder.getFunctionType( + { + getPointerPointerType(), /* void **function */ + getPointerType(), /* void *module */ + getPointerType() /* char *name */ + }, + getCUResultType()))); } if (!module.getNamedFunction(cuLaunchKernelName)) { // Other than the CUDA api, the wrappers use uintptr_t to match the // LLVM type if MLIR's index type, which the GPU dialect uses. // Furthermore, they use void* instead of CUDA's opaque CUfunction and // CUstream. - module.push_back(Function::create( + module.push_back(FuncOp::create( loc, cuLaunchKernelName, builder.getFunctionType( { @@ -203,18 +201,18 @@ void GpuLaunchFuncToCudaCallsPass::declareCudaFunctions(Location loc) { if (!module.getNamedFunction(cuGetStreamHelperName)) { // Helper function to get the current CUDA stream. Uses void* instead of // CUDAs opaque CUstream. - module.push_back(Function::create( + module.push_back(FuncOp::create( loc, cuGetStreamHelperName, builder.getFunctionType({}, getPointerType() /* void *stream */))); } if (!module.getNamedFunction(cuStreamSynchronizeName)) { module.push_back( - Function::create(loc, cuStreamSynchronizeName, - builder.getFunctionType( - { - getPointerType() /* CUstream stream */ - }, - getCUResultType()))); + FuncOp::create(loc, cuStreamSynchronizeName, + builder.getFunctionType( + { + getPointerType() /* CUstream stream */ + }, + getCUResultType()))); } } @@ -264,7 +262,7 @@ GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp, // %0[n] = constant name[n] // %0[n+1] = 0 Value *GpuLaunchFuncToCudaCallsPass::generateKernelNameConstant( - Function kernelFunction, Location &loc, OpBuilder &builder) { + FuncOp kernelFunction, Location &loc, OpBuilder &builder) { // TODO(herhut): Make this a constant once this is supported. auto kernelNameSize = builder.create<LLVM::ConstantOp>( loc, getInt32Type(), @@ -337,7 +335,7 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls( // Emit the load module call to load the module data. Error checking is done // in the called helper function. auto cuModule = allocatePointer(builder, loc); - Function cuModuleLoad = getModule().getNamedFunction(cuModuleLoadName); + FuncOp cuModuleLoad = getModule().getNamedFunction(cuModuleLoadName); builder.create<LLVM::CallOp>(loc, ArrayRef<Type>{getCUResultType()}, builder.getFunctionAttr(cuModuleLoad), ArrayRef<Value *>{cuModule, data.getResult(0)}); @@ -347,14 +345,14 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls( builder.create<LLVM::LoadOp>(loc, getPointerType(), cuModule); auto kernelName = generateKernelNameConstant(kernelFunction, loc, builder); auto cuFunction = allocatePointer(builder, loc); - Function cuModuleGetFunction = + FuncOp cuModuleGetFunction = getModule().getNamedFunction(cuModuleGetFunctionName); builder.create<LLVM::CallOp>( loc, ArrayRef<Type>{getCUResultType()}, builder.getFunctionAttr(cuModuleGetFunction), ArrayRef<Value *>{cuFunction, cuOwningModuleRef, kernelName}); // Grab the global stream needed for execution. - Function cuGetStreamHelper = + FuncOp cuGetStreamHelper = getModule().getNamedFunction(cuGetStreamHelperName); auto cuStream = builder.create<LLVM::CallOp>( loc, ArrayRef<Type>{getPointerType()}, diff --git a/mlir/lib/Conversion/GPUToCUDA/GenerateCubinAccessors.cpp b/mlir/lib/Conversion/GPUToCUDA/GenerateCubinAccessors.cpp index 6306c567907..e19e2de99fb 100644 --- a/mlir/lib/Conversion/GPUToCUDA/GenerateCubinAccessors.cpp +++ b/mlir/lib/Conversion/GPUToCUDA/GenerateCubinAccessors.cpp @@ -59,10 +59,10 @@ private: return LLVM::LLVMType::getIntNTy(llvmDialect, bits); } - Function getMallocHelper(Location loc, Builder &builder) { - Function result = getModule().getNamedFunction(kMallocHelperName); + FuncOp getMallocHelper(Location loc, Builder &builder) { + FuncOp result = getModule().getNamedFunction(kMallocHelperName); if (!result) { - result = Function::create( + result = FuncOp::create( loc, kMallocHelperName, builder.getFunctionType(ArrayRef<Type>{getIndexType()}, LLVM::LLVMType::getInt8PtrTy(llvmDialect))); @@ -75,13 +75,13 @@ private: // data from blob. As there are currently no global constants, this uses a // sequence of store operations. // TODO(herhut): Use global constants instead. - Function generateCubinAccessor(Builder &builder, Function &orig, - StringAttr blob) { + FuncOp generateCubinAccessor(Builder &builder, FuncOp &orig, + StringAttr blob) { Location loc = orig.getLoc(); SmallString<128> nameBuffer(orig.getName()); nameBuffer.append(kCubinGetterSuffix); // Generate a function that returns void*. - Function result = Function::create( + FuncOp result = FuncOp::create( loc, mlir::Identifier::get(nameBuffer, &getContext()), builder.getFunctionType(ArrayRef<Type>{}, LLVM::LLVMType::getInt8PtrTy(llvmDialect))); @@ -127,7 +127,7 @@ public: for (auto it = functions.begin(); it != functions.end();) { // Move iterator to after the current function so that potential insertion // of the accessor is after the kernel with cubin iself. - Function orig = *it++; + FuncOp orig = *it++; StringAttr cubinBlob = orig.getAttrOfType<StringAttr>(kCubinAnnotation); if (!cubinBlob) continue; diff --git a/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp index 01d473e7f59..dc783f94865 100644 --- a/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp @@ -441,13 +441,13 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> { createIndexConstant(rewriter, op->getLoc(), elementSize)}); // Insert the `malloc` declaration if it is not already present. - Function mallocFunc = + FuncOp mallocFunc = op->getParentOfType<FuncOp>().getModule().getNamedFunction("malloc"); if (!mallocFunc) { auto mallocType = rewriter.getFunctionType(getIndexType(), getVoidPtrType()); mallocFunc = - Function::create(rewriter.getUnknownLoc(), "malloc", mallocType); + FuncOp::create(rewriter.getUnknownLoc(), "malloc", mallocType); op->getParentOfType<FuncOp>().getModule().push_back(mallocFunc); } @@ -503,11 +503,11 @@ struct DeallocOpLowering : public LLVMLegalizationPattern<DeallocOp> { OperandAdaptor<DeallocOp> transformed(operands); // Insert the `free` declaration if it is not already present. - Function freeFunc = + FuncOp freeFunc = op->getParentOfType<FuncOp>().getModule().getNamedFunction("free"); if (!freeFunc) { auto freeType = rewriter.getFunctionType(getVoidPtrType(), {}); - freeFunc = Function::create(rewriter.getUnknownLoc(), "free", freeType); + freeFunc = FuncOp::create(rewriter.getUnknownLoc(), "free", freeType); op->getParentOfType<FuncOp>().getModule().push_back(freeFunc); } diff --git a/mlir/lib/EDSC/CoreAPIs.cpp b/mlir/lib/EDSC/CoreAPIs.cpp index 578b8673658..8b1831342b8 100644 --- a/mlir/lib/EDSC/CoreAPIs.cpp +++ b/mlir/lib/EDSC/CoreAPIs.cpp @@ -98,6 +98,6 @@ mlir_attr_t makeBoolAttr(mlir_context_t context, bool value) { } unsigned getFunctionArity(mlir_func_t function) { - auto f = mlir::Function::getFromOpaquePointer(function); + auto f = mlir::FuncOp::getFromOpaquePointer(function); return f.getNumArguments(); } diff --git a/mlir/lib/ExecutionEngine/MemRefUtils.cpp b/mlir/lib/ExecutionEngine/MemRefUtils.cpp index f13b743de0c..e34bf4455ab 100644 --- a/mlir/lib/ExecutionEngine/MemRefUtils.cpp +++ b/mlir/lib/ExecutionEngine/MemRefUtils.cpp @@ -67,7 +67,7 @@ allocMemRefDescriptor(Type type, bool allocateData = true, } llvm::Expected<SmallVector<void *, 8>> -mlir::allocateMemRefArguments(Function func, float initialValue) { +mlir::allocateMemRefArguments(FuncOp func, float initialValue) { SmallVector<void *, 8> args; args.reserve(func.getNumArguments()); for (const auto &arg : func.getArguments()) { diff --git a/mlir/lib/GPU/IR/GPUDialect.cpp b/mlir/lib/GPU/IR/GPUDialect.cpp index 92034c5d288..22f87a9911f 100644 --- a/mlir/lib/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/GPU/IR/GPUDialect.cpp @@ -32,7 +32,7 @@ using namespace mlir::gpu; StringRef GPUDialect::getDialectName() { return "gpu"; } -bool GPUDialect::isKernel(Function function) { +bool GPUDialect::isKernel(FuncOp function) { UnitAttr isKernelAttr = function.getAttrOfType<UnitAttr>(getKernelFuncAttrName()); return static_cast<bool>(isKernelAttr); @@ -84,25 +84,25 @@ void LaunchOp::build(Builder *builder, OperationState *result, Value *gridSizeX, Region &LaunchOp::getBody() { return getOperation()->getRegion(0); } KernelDim3 LaunchOp::getBlockIds() { - assert(!getBody().getBlocks().empty() && "Function body must not be empty."); + assert(!getBody().getBlocks().empty() && "FuncOp body must not be empty."); auto args = getBody().getBlocks().front().getArguments(); return KernelDim3{args[0], args[1], args[2]}; } KernelDim3 LaunchOp::getThreadIds() { - assert(!getBody().getBlocks().empty() && "Function body must not be empty."); + assert(!getBody().getBlocks().empty() && "FuncOp body must not be empty."); auto args = getBody().getBlocks().front().getArguments(); return KernelDim3{args[3], args[4], args[5]}; } KernelDim3 LaunchOp::getGridSize() { - assert(!getBody().getBlocks().empty() && "Function body must not be empty."); + assert(!getBody().getBlocks().empty() && "FuncOp body must not be empty."); auto args = getBody().getBlocks().front().getArguments(); return KernelDim3{args[6], args[7], args[8]}; } KernelDim3 LaunchOp::getBlockSize() { - assert(!getBody().getBlocks().empty() && "Function body must not be empty."); + assert(!getBody().getBlocks().empty() && "FuncOp body must not be empty."); auto args = getBody().getBlocks().front().getArguments(); return KernelDim3{args[9], args[10], args[11]}; } @@ -378,10 +378,9 @@ void LaunchOp::getCanonicalizationPatterns(OwningRewritePatternList &results, //===----------------------------------------------------------------------===// void LaunchFuncOp::build(Builder *builder, OperationState *result, - Function kernelFunc, Value *gridSizeX, - Value *gridSizeY, Value *gridSizeZ, Value *blockSizeX, - Value *blockSizeY, Value *blockSizeZ, - ArrayRef<Value *> kernelOperands) { + FuncOp kernelFunc, Value *gridSizeX, Value *gridSizeY, + Value *gridSizeZ, Value *blockSizeX, Value *blockSizeY, + Value *blockSizeZ, ArrayRef<Value *> kernelOperands) { // Add grid and block sizes as op operands, followed by the data operands. result->addOperands( {gridSizeX, gridSizeY, gridSizeZ, blockSizeX, blockSizeY, blockSizeZ}); @@ -391,7 +390,7 @@ void LaunchFuncOp::build(Builder *builder, OperationState *result, } void LaunchFuncOp::build(Builder *builder, OperationState *result, - Function kernelFunc, KernelDim3 gridSize, + FuncOp kernelFunc, KernelDim3 gridSize, KernelDim3 blockSize, ArrayRef<Value *> kernelOperands) { build(builder, result, kernelFunc, gridSize.x, gridSize.y, gridSize.z, @@ -427,7 +426,7 @@ LogicalResult LaunchFuncOp::verify() { } auto module = getParentOfType<ModuleOp>(); - Function kernelFunc = module.getNamedFunction(kernel()); + FuncOp kernelFunc = module.getNamedFunction(kernel()); if (!kernelFunc) return emitError() << "kernel function '" << kernelAttr << "' is undefined"; diff --git a/mlir/lib/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/GPU/Transforms/KernelOutlining.cpp index 0bc7041bd6e..75b98fdb9c5 100644 --- a/mlir/lib/GPU/Transforms/KernelOutlining.cpp +++ b/mlir/lib/GPU/Transforms/KernelOutlining.cpp @@ -40,7 +40,7 @@ static void createForAllDimensions(OpBuilder &builder, Location loc, // Add operations generating block/thread ids and gird/block dimensions at the // beginning of `kernelFunc` and replace uses of the respective function args. -static void injectGpuIndexOperations(Location loc, Function kernelFunc) { +static void injectGpuIndexOperations(Location loc, FuncOp kernelFunc) { OpBuilder OpBuilder(kernelFunc.getBody()); SmallVector<Value *, 12> indexOps; createForAllDimensions<gpu::BlockId>(OpBuilder, loc, indexOps); @@ -58,14 +58,14 @@ static void injectGpuIndexOperations(Location loc, Function kernelFunc) { // Outline the `gpu.launch` operation body into a kernel function. Replace // `gpu.return` operations by `std.return` in the generated functions. -static Function outlineKernelFunc(gpu::LaunchOp launchOp) { +static FuncOp outlineKernelFunc(gpu::LaunchOp launchOp) { Location loc = launchOp.getLoc(); SmallVector<Type, 4> kernelOperandTypes(launchOp.getKernelOperandTypes()); FunctionType type = FunctionType::get(kernelOperandTypes, {}, launchOp.getContext()); std::string kernelFuncName = Twine(launchOp.getParentOfType<FuncOp>().getName(), "_kernel").str(); - Function outlinedFunc = Function::create(loc, kernelFuncName, type); + FuncOp outlinedFunc = FuncOp::create(loc, kernelFuncName, type); outlinedFunc.getBody().takeBody(launchOp.getBody()); Builder builder(launchOp.getContext()); outlinedFunc.setAttr(gpu::GPUDialect::getKernelFuncAttrName(), @@ -81,8 +81,7 @@ static Function outlineKernelFunc(gpu::LaunchOp launchOp) { // Replace `gpu.launch` operations with an `gpu.launch_func` operation launching // `kernelFunc`. -static void convertToLaunchFuncOp(gpu::LaunchOp &launchOp, - Function kernelFunc) { +static void convertToLaunchFuncOp(gpu::LaunchOp &launchOp, FuncOp kernelFunc) { OpBuilder builder(launchOp); SmallVector<Value *, 4> kernelOperandValues( launchOp.getKernelOperandValues()); @@ -100,7 +99,7 @@ public: ModuleManager moduleManager(getModule()); for (auto func : getModule().getOps<FuncOp>()) { func.walk<mlir::gpu::LaunchOp>([&](mlir::gpu::LaunchOp op) { - Function outlinedFunc = outlineKernelFunc(op); + FuncOp outlinedFunc = outlineKernelFunc(op); moduleManager.insert(outlinedFunc); convertToLaunchFuncOp(op, outlinedFunc); }); diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp index 6d0df6ded8e..ddead2e01a7 100644 --- a/mlir/lib/IR/Builders.cpp +++ b/mlir/lib/IR/Builders.cpp @@ -177,7 +177,7 @@ IntegerSetAttr Builder::getIntegerSetAttr(IntegerSet set) { TypeAttr Builder::getTypeAttr(Type type) { return TypeAttr::get(type); } -FunctionAttr Builder::getFunctionAttr(Function value) { +FunctionAttr Builder::getFunctionAttr(FuncOp value) { return getFunctionAttr(value.getName()); } FunctionAttr Builder::getFunctionAttr(StringRef value) { @@ -337,14 +337,14 @@ OpBuilder::~OpBuilder() {} /// Add new block and set the insertion point to the end of it. If an /// 'insertBefore' block is passed, the block will be placed before the /// specified block. If not, the block will be appended to the end of the -/// current function. +/// current region. Block *OpBuilder::createBlock(Block *insertBefore) { Block *b = new Block(); // If we are supposed to insert before a specific block, do so, otherwise add - // the block to the end of the function. + // the block to the end of the region. if (insertBefore) - region->getBlocks().insert(Function::iterator(insertBefore), b); + region->getBlocks().insert(Region::iterator(insertBefore), b); else region->push_back(b); diff --git a/mlir/lib/IR/Dialect.cpp b/mlir/lib/IR/Dialect.cpp index e38b95ff0f7..1e042cbf893 100644 --- a/mlir/lib/IR/Dialect.cpp +++ b/mlir/lib/IR/Dialect.cpp @@ -71,14 +71,14 @@ Dialect::~Dialect() {} /// Verify an attribute from this dialect on the given function. Returns /// failure if the verification failed, success otherwise. -LogicalResult Dialect::verifyFunctionAttribute(Function, NamedAttribute) { +LogicalResult Dialect::verifyFunctionAttribute(FuncOp, NamedAttribute) { return success(); } /// Verify an attribute from this dialect on the argument at 'argIndex' for /// the given function. Returns failure if the verification failed, success /// otherwise. -LogicalResult Dialect::verifyFunctionArgAttribute(Function, unsigned argIndex, +LogicalResult Dialect::verifyFunctionArgAttribute(FuncOp, unsigned argIndex, NamedAttribute) { return success(); } diff --git a/mlir/lib/IR/Function.cpp b/mlir/lib/IR/Function.cpp index 973a0910f42..b471010d0af 100644 --- a/mlir/lib/IR/Function.cpp +++ b/mlir/lib/IR/Function.cpp @@ -33,22 +33,22 @@ using namespace mlir; // Function Operation. //===----------------------------------------------------------------------===// -Function FuncOp::create(Location location, StringRef name, FunctionType type, - ArrayRef<NamedAttribute> attrs) { +FuncOp FuncOp::create(Location location, StringRef name, FunctionType type, + ArrayRef<NamedAttribute> attrs) { OperationState state(location, "func"); Builder builder(location->getContext()); - Function::build(&builder, &state, name, type, attrs); + FuncOp::build(&builder, &state, name, type, attrs); return llvm::cast<FuncOp>(Operation::create(state)); } -Function FuncOp::create(Location location, StringRef name, FunctionType type, - llvm::iterator_range<dialect_attr_iterator> attrs) { +FuncOp FuncOp::create(Location location, StringRef name, FunctionType type, + llvm::iterator_range<dialect_attr_iterator> attrs) { SmallVector<NamedAttribute, 8> attrRef(attrs); return create(location, name, type, llvm::makeArrayRef(attrRef)); } -Function FuncOp::create(Location location, StringRef name, FunctionType type, - ArrayRef<NamedAttribute> attrs, - ArrayRef<NamedAttributeList> argAttrs) { - Function func = create(location, name, type, attrs); +FuncOp FuncOp::create(Location location, StringRef name, FunctionType type, + ArrayRef<NamedAttribute> attrs, + ArrayRef<NamedAttributeList> argAttrs) { + FuncOp func = create(location, name, type, attrs); func.setAllArgAttrs(argAttrs); return func; } @@ -74,7 +74,7 @@ void FuncOp::build(Builder *builder, OperationState *result, StringRef name, } /// Get the parent module. -ModuleOp Function::getModule() { +ModuleOp FuncOp::getModule() { auto *parent = getOperation()->getContainingRegion(); return parent ? parent->getParentOfType<ModuleOp>() : nullptr; } diff --git a/mlir/lib/IR/Region.cpp b/mlir/lib/IR/Region.cpp index 2818b1ce207..d6ed2102fb3 100644 --- a/mlir/lib/IR/Region.cpp +++ b/mlir/lib/IR/Region.cpp @@ -17,7 +17,6 @@ #include "mlir/IR/Region.h" #include "mlir/IR/BlockAndValueMapping.h" -#include "mlir/IR/Function.h" #include "mlir/IR/Operation.h" using namespace mlir; diff --git a/mlir/lib/IR/Value.cpp b/mlir/lib/IR/Value.cpp index 669f641b734..4fa49213a3f 100644 --- a/mlir/lib/IR/Value.cpp +++ b/mlir/lib/IR/Value.cpp @@ -17,7 +17,6 @@ #include "mlir/IR/Value.h" #include "mlir/IR/Block.h" -#include "mlir/IR/Function.h" #include "mlir/IR/Operation.h" using namespace mlir; diff --git a/mlir/lib/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/LLVMIR/IR/LLVMDialect.cpp index 0dbf63a3ce7..7a73d89f77e 100644 --- a/mlir/lib/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/LLVMIR/IR/LLVMDialect.cpp @@ -816,7 +816,7 @@ void LLVMDialect::printType(Type type, raw_ostream &os) const { } /// Verify LLVMIR function argument attributes. -LogicalResult LLVMDialect::verifyFunctionArgAttribute(Function func, +LogicalResult LLVMDialect::verifyFunctionArgAttribute(FuncOp func, unsigned argIdx, NamedAttribute argAttr) { // Check that llvm.noalias is a boolean attribute. diff --git a/mlir/lib/Linalg/Transforms/Fusion.cpp b/mlir/lib/Linalg/Transforms/Fusion.cpp index 5761cc637b7..cb9509ed54e 100644 --- a/mlir/lib/Linalg/Transforms/Fusion.cpp +++ b/mlir/lib/Linalg/Transforms/Fusion.cpp @@ -209,7 +209,7 @@ static bool isStructurallyFusableProducer(LinalgOp producer, Value *readView, return true; } -static void fuseLinalgOps(Function f, ArrayRef<int64_t> tileSizes) { +static void fuseLinalgOps(FuncOp f, ArrayRef<int64_t> tileSizes) { OperationFolder state; DenseSet<Operation *> eraseSet; diff --git a/mlir/lib/Linalg/Transforms/LowerToLLVMDialect.cpp b/mlir/lib/Linalg/Transforms/LowerToLLVMDialect.cpp index 2b9c893276a..0cda24722e2 100644 --- a/mlir/lib/Linalg/Transforms/LowerToLLVMDialect.cpp +++ b/mlir/lib/Linalg/Transforms/LowerToLLVMDialect.cpp @@ -171,11 +171,11 @@ public: auto int64Ty = lowering.convertType(rewriter.getIntegerType(64)); // Insert the `malloc` declaration if it is not already present. auto module = op->getParentOfType<ModuleOp>(); - Function mallocFunc = module.getNamedFunction("malloc"); + FuncOp mallocFunc = module.getNamedFunction("malloc"); if (!mallocFunc) { auto mallocType = rewriter.getFunctionType(int64Ty, voidPtrTy); mallocFunc = - Function::create(rewriter.getUnknownLoc(), "malloc", mallocType); + FuncOp::create(rewriter.getUnknownLoc(), "malloc", mallocType); module.push_back(mallocFunc); } @@ -232,10 +232,10 @@ public: LLVM::LLVMType::getInt8Ty(lowering.getDialect()).getPointerTo(); // Insert the `free` declaration if it is not already present. auto module = op->getParentOfType<ModuleOp>(); - Function freeFunc = module.getNamedFunction("free"); + FuncOp freeFunc = module.getNamedFunction("free"); if (!freeFunc) { auto freeType = rewriter.getFunctionType(voidPtrTy, {}); - freeFunc = Function::create(rewriter.getUnknownLoc(), "free", freeType); + freeFunc = FuncOp::create(rewriter.getUnknownLoc(), "free", freeType); module.push_back(freeFunc); } @@ -573,7 +573,7 @@ public: // Create a function definition which takes as argument pointers to the input // types and returns pointers to the output types. -static Function getLLVMLibraryCallImplDefinition(Function libFn) { +static FuncOp getLLVMLibraryCallImplDefinition(FuncOp libFn) { auto implFnName = (libFn.getName().str() + "_impl"); auto module = libFn.getModule(); if (auto f = module.getNamedFunction(implFnName)) { @@ -589,7 +589,7 @@ static Function getLLVMLibraryCallImplDefinition(Function libFn) { auto implFnType = FunctionType::get(fnArgTypes, {}, libFn.getContext()); // Insert the implementation function definition. - auto implFnDefn = Function::create(libFn.getLoc(), implFnName, implFnType); + auto implFnDefn = FuncOp::create(libFn.getLoc(), implFnName, implFnType); module.push_back(implFnDefn); return implFnDefn; } @@ -597,9 +597,9 @@ static Function getLLVMLibraryCallImplDefinition(Function libFn) { // Get function definition for the LinalgOp. If it doesn't exist, insert a // definition. template <typename LinalgOp> -static Function getLLVMLibraryCallDeclaration(Operation *op, - LLVMTypeConverter &lowering, - PatternRewriter &rewriter) { +static FuncOp getLLVMLibraryCallDeclaration(Operation *op, + LLVMTypeConverter &lowering, + PatternRewriter &rewriter) { assert(isa<LinalgOp>(op)); auto fnName = LinalgOp::getLibraryCallName(); auto module = op->getParentOfType<ModuleOp>(); @@ -619,14 +619,14 @@ static Function getLLVMLibraryCallDeclaration(Operation *op, "Library call for linalg operation can be generated only for ops that " "have void return types"); auto libFnType = FunctionType::get(inputTypes, {}, op->getContext()); - auto libFn = Function::create(op->getLoc(), fnName, libFnType); + auto libFn = FuncOp::create(op->getLoc(), fnName, libFnType); module.push_back(libFn); // Return after creating the function definition. The body will be created // later. return libFn; } -static void getLLVMLibraryCallDefinition(Function fn, +static void getLLVMLibraryCallDefinition(FuncOp fn, LLVMTypeConverter &lowering) { // Generate the implementation function definition. auto implFn = getLLVMLibraryCallImplDefinition(fn); @@ -666,17 +666,15 @@ public: return convertLinalgType(t, *this); } - void addLibraryFnDeclaration(Function fn) { + void addLibraryFnDeclaration(FuncOp fn) { libraryFnDeclarations.push_back(fn); } - ArrayRef<Function> getLibraryFnDeclarations() { - return libraryFnDeclarations; - } + ArrayRef<FuncOp> getLibraryFnDeclarations() { return libraryFnDeclarations; } private: /// List of library functions declarations needed during dialect conversion - SmallVector<Function, 2> libraryFnDeclarations; + SmallVector<FuncOp, 2> libraryFnDeclarations; }; } // end anonymous namespace @@ -727,7 +725,7 @@ struct LowerLinalgToLLVMPass : public ModulePass<LowerLinalgToLLVMPass> { // This is currently written as a standalone function because the lowering to // affine will look different than lowering to LLVM and it is still unclear how // everything will be eventually structured. -static void lowerLinalgSubViewOps(Function &f) { +static void lowerLinalgSubViewOps(FuncOp &f) { f.walk<SubViewOp>([&](SubViewOp op) { OpBuilder b(op); ScopedContext scope(b, op.getLoc()); @@ -750,7 +748,7 @@ static void lowerLinalgSubViewOps(Function &f) { // Converts a `linalg.for` op to CFG form before actual conversion to the LLVM // dialect starts. -static void lowerLinalgForToCFG(Function &f) { +static void lowerLinalgForToCFG(FuncOp &f) { // Collect all the For operations. We do this as a prepass to avoid // invalidating the walker with our rewrite. SmallVector<linalg::ForOp, 8> instsToRewrite; diff --git a/mlir/lib/Linalg/Transforms/Tiling.cpp b/mlir/lib/Linalg/Transforms/Tiling.cpp index 4955a80ef5f..e6bb6c302f7 100644 --- a/mlir/lib/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Linalg/Transforms/Tiling.cpp @@ -482,7 +482,7 @@ mlir::linalg::tileLinalgOp(LinalgOp op, ArrayRef<int64_t> tileSizes, return tileLinalgOp(op, tileSizeValues, folder, viewsToPromote); } -static void tileLinalgOps(Function f, ArrayRef<int64_t> tileSizes, +static void tileLinalgOps(FuncOp f, ArrayRef<int64_t> tileSizes, bool promoteViews) { OperationFolder folder; f.walk<LinalgOp>([promoteViews, tileSizes, &folder](LinalgOp op) { diff --git a/mlir/lib/Pass/IRPrinting.cpp b/mlir/lib/Pass/IRPrinting.cpp index aef16ff231a..232068fad08 100644 --- a/mlir/lib/Pass/IRPrinting.cpp +++ b/mlir/lib/Pass/IRPrinting.cpp @@ -61,8 +61,8 @@ private: static void printIR(const llvm::Any &ir, bool printModuleScope, raw_ostream &out) { // Check for printing at module scope. - if (printModuleScope && llvm::any_isa<Function>(ir)) { - Function function = llvm::any_cast<Function>(ir); + if (printModuleScope && llvm::any_isa<FuncOp>(ir)) { + FuncOp function = llvm::any_cast<FuncOp>(ir); // Print the function name and a newline before the Module. out << " (function: " << function.getName() << ")\n"; @@ -74,8 +74,8 @@ static void printIR(const llvm::Any &ir, bool printModuleScope, out << "\n"; // Print the given function. - if (llvm::any_isa<Function>(ir)) { - llvm::any_cast<Function>(ir).print(out); + if (llvm::any_isa<FuncOp>(ir)) { + llvm::any_cast<FuncOp>(ir).print(out); return; } diff --git a/mlir/lib/Pass/Pass.cpp b/mlir/lib/Pass/Pass.cpp index a35efbdcb7a..0357fb3a8d4 100644 --- a/mlir/lib/Pass/Pass.cpp +++ b/mlir/lib/Pass/Pass.cpp @@ -42,7 +42,7 @@ using namespace mlir::detail; void Pass::anchor() {} /// Forwarding function to execute this pass. -LogicalResult FunctionPassBase::run(Function fn, FunctionAnalysisManager &fam) { +LogicalResult FunctionPassBase::run(FuncOp fn, FunctionAnalysisManager &fam) { // Initialize the pass state. passState.emplace(fn, fam); @@ -110,7 +110,7 @@ FunctionPassExecutor::FunctionPassExecutor(const FunctionPassExecutor &rhs) } /// Run all of the passes in this manager over the current function. -LogicalResult detail::FunctionPassExecutor::run(Function function, +LogicalResult detail::FunctionPassExecutor::run(FuncOp function, FunctionAnalysisManager &fam) { // Run each of the held passes. for (auto &pass : passes) @@ -135,8 +135,7 @@ LogicalResult detail::ModulePassExecutor::run(Module module, /// Utility to run the given function and analysis manager on a provided /// function pass executor. -static LogicalResult runFunctionPipeline(FunctionPassExecutor &fpe, - Function func, +static LogicalResult runFunctionPipeline(FunctionPassExecutor &fpe, FuncOp func, FunctionAnalysisManager &fam) { // Run the function pipeline over the provided function. auto result = fpe.run(func, fam); @@ -184,7 +183,7 @@ void ModuleToFunctionPassAdaptorParallel::runOnModule() { // Run a prepass over the module to collect the functions to execute a over. // This ensures that an analysis manager exists for each function, as well as // providing a queue of functions to execute over. - std::vector<std::pair<Function, FunctionAnalysisManager>> funcAMPairs; + std::vector<std::pair<FuncOp, FunctionAnalysisManager>> funcAMPairs; for (auto func : getModule().getOps<FuncOp>()) if (!func.isExternal()) funcAMPairs.emplace_back(func, mam.slice(func)); @@ -340,13 +339,13 @@ PassInstrumentor *FunctionAnalysisManager::getPassInstrumentor() const { } /// Create an analysis slice for the given child function. -FunctionAnalysisManager ModuleAnalysisManager::slice(Function func) { +FunctionAnalysisManager ModuleAnalysisManager::slice(FuncOp func) { assert(func.getModule() == moduleAnalyses.getIRUnit() && "function has a different parent module"); auto it = functionAnalyses.find(func); if (it == functionAnalyses.end()) { - it = functionAnalyses.try_emplace(func, new AnalysisMap<Function>(func)) - .first; + it = + functionAnalyses.try_emplace(func, new AnalysisMap<FuncOp>(func)).first; } return {this, it->second.get()}; } diff --git a/mlir/lib/Pass/PassDetail.h b/mlir/lib/Pass/PassDetail.h index b0cd22820a3..46674eabb5d 100644 --- a/mlir/lib/Pass/PassDetail.h +++ b/mlir/lib/Pass/PassDetail.h @@ -48,7 +48,7 @@ public: FunctionPassExecutor(const FunctionPassExecutor &rhs); /// Run the executor on the given function. - LogicalResult run(Function function, FunctionAnalysisManager &fam); + LogicalResult run(FuncOp function, FunctionAnalysisManager &fam); /// Add a pass to the current executor. This takes ownership over the provided /// pass pointer. diff --git a/mlir/lib/SPIRV/Serialization/ConvertFromBinary.cpp b/mlir/lib/SPIRV/Serialization/ConvertFromBinary.cpp index d3eec3e9c13..d1efd56b97f 100644 --- a/mlir/lib/SPIRV/Serialization/ConvertFromBinary.cpp +++ b/mlir/lib/SPIRV/Serialization/ConvertFromBinary.cpp @@ -36,7 +36,7 @@ using namespace mlir; // block. The created block will be terminated by `std.return`. Block *createOneBlockFunction(Builder builder, Module module) { auto fnType = builder.getFunctionType(/*inputs=*/{}, /*results=*/{}); - auto fn = Function::create(builder.getUnknownLoc(), "spirv_module", fnType); + auto fn = FuncOp::create(builder.getUnknownLoc(), "spirv_module", fnType); module.push_back(fn); fn.addEntryBlock(); diff --git a/mlir/lib/Support/MlirOptMain.cpp b/mlir/lib/Support/MlirOptMain.cpp index ab7b021f920..80cba5ad73f 100644 --- a/mlir/lib/Support/MlirOptMain.cpp +++ b/mlir/lib/Support/MlirOptMain.cpp @@ -24,7 +24,6 @@ #include "mlir/Analysis/Passes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Diagnostics.h" -#include "mlir/IR/Function.h" #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Module.h" diff --git a/mlir/lib/Target/LLVMIR/ConvertToNVVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertToNVVMIR.cpp index 0fef7606812..00502306362 100644 --- a/mlir/lib/Target/LLVMIR/ConvertToNVVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertToNVVMIR.cpp @@ -68,7 +68,7 @@ std::unique_ptr<llvm::Module> mlir::translateModuleToNVVMIR(Module m) { // Insert the nvvm.annotations kernel so that the NVVM backend recognizes the // function as a kernel. - for (Function func : m.getOps<FuncOp>()) { + for (FuncOp func : m.getOps<FuncOp>()) { if (!func.getAttrOfType<UnitAttr>(gpu::GPUDialect::getKernelFuncAttrName())) continue; diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp index a358e8363f4..9388f2318d2 100644 --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -275,7 +275,7 @@ static Value *getPHISourceValue(Block *current, Block *pred, : terminator.getSuccessorOperand(1, index); } -void ModuleTranslation::connectPHINodes(Function func) { +void ModuleTranslation::connectPHINodes(FuncOp func) { // Skip the first block, it cannot be branched to and its arguments correspond // to the arguments of the LLVM function. for (auto it = std::next(func.begin()), eit = func.end(); it != eit; ++it) { @@ -306,7 +306,7 @@ static void topologicalSortImpl(llvm::SetVector<Block *> &blocks, Block *b) { } // Sort function blocks topologically. -static llvm::SetVector<Block *> topologicalSort(Function f) { +static llvm::SetVector<Block *> topologicalSort(FuncOp f) { // For each blocks that has not been visited yet (i.e. that has no // predecessors), add it to the list and traverse its successors in DFS // preorder. @@ -320,7 +320,7 @@ static llvm::SetVector<Block *> topologicalSort(Function f) { return blocks; } -bool ModuleTranslation::convertOneFunction(Function func) { +bool ModuleTranslation::convertOneFunction(FuncOp func) { // Clear the block and value mappings, they are only relevant within one // function. blockMapping.clear(); @@ -375,7 +375,7 @@ bool ModuleTranslation::convertOneFunction(Function func) { bool ModuleTranslation::convertFunctions() { // Declare all functions first because there may be function calls that form a // call graph with cycles. - for (Function function : mlirModule.getOps<FuncOp>()) { + for (FuncOp function : mlirModule.getOps<FuncOp>()) { mlir::BoolAttr isVarArgsAttr = function.getAttrOfType<BoolAttr>("std.varargs"); bool isVarArgs = isVarArgsAttr && isVarArgsAttr.getValue(); @@ -392,7 +392,7 @@ bool ModuleTranslation::convertFunctions() { } // Convert functions. - for (Function function : mlirModule.getOps<FuncOp>()) { + for (FuncOp function : mlirModule.getOps<FuncOp>()) { // Ignore external functions. if (function.isExternal()) continue; diff --git a/mlir/lib/Transforms/DialectConversion.cpp b/mlir/lib/Transforms/DialectConversion.cpp index 42683afc468..23c12a0e7c4 100644 --- a/mlir/lib/Transforms/DialectConversion.cpp +++ b/mlir/lib/Transforms/DialectConversion.cpp @@ -849,7 +849,7 @@ struct FunctionConverter { /// error, success otherwise. If 'signatureConversion' is provided, the /// arguments of the entry block are updated accordingly. LogicalResult - convertFunction(Function f, + convertFunction(FuncOp f, TypeConverter::SignatureConversion *signatureConversion); /// Converts the given region starting from the entry block and following the @@ -957,7 +957,7 @@ FunctionConverter::convertRegion(DialectConversionRewriter &rewriter, } LogicalResult FunctionConverter::convertFunction( - Function f, TypeConverter::SignatureConversion *signatureConversion) { + FuncOp f, TypeConverter::SignatureConversion *signatureConversion) { // If this is an external function, there is nothing else to do. if (f.isExternal()) return success(); @@ -1131,14 +1131,14 @@ LogicalResult mlir::applyConversionPatterns(Module module, ConversionTarget &target, TypeConverter &converter, OwningRewritePatternList &&patterns) { - SmallVector<Function, 32> allFunctions(module.getOps<FuncOp>()); + SmallVector<FuncOp, 32> allFunctions(module.getOps<FuncOp>()); return applyConversionPatterns(allFunctions, target, converter, std::move(patterns)); } /// Convert the given functions with the provided conversion patterns. LogicalResult mlir::applyConversionPatterns( - MutableArrayRef<Function> fns, ConversionTarget &target, + MutableArrayRef<FuncOp> fns, ConversionTarget &target, TypeConverter &converter, OwningRewritePatternList &&patterns) { if (fns.empty()) return success(); @@ -1174,7 +1174,7 @@ LogicalResult mlir::applyConversionPatterns( /// convert as many of the operations within 'fn' as possible given the set of /// patterns. LogicalResult -mlir::applyConversionPatterns(Function fn, ConversionTarget &target, +mlir::applyConversionPatterns(FuncOp fn, ConversionTarget &target, OwningRewritePatternList &&patterns) { // Convert the body of this function. FunctionConverter converter(fn.getContext(), target, patterns); diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp index 830546db497..f78c941f923 100644 --- a/mlir/lib/Transforms/DmaGeneration.cpp +++ b/mlir/lib/Transforms/DmaGeneration.cpp @@ -771,7 +771,7 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) { } void DmaGeneration::runOnFunction() { - Function f = getFunction(); + FuncOp f = getFunction(); OpBuilder topBuilder(f.getBody()); zeroIndex = topBuilder.create<ConstantIndexOp>(f.getLoc(), 0); diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp index b2557a6c6fd..ea1a03f09a3 100644 --- a/mlir/lib/Transforms/LoopFusion.cpp +++ b/mlir/lib/Transforms/LoopFusion.cpp @@ -150,7 +150,7 @@ static bool isMemRefDereferencingOp(Operation &op) { } // MemRefDependenceGraph is a graph data structure where graph nodes are -// top-level operations in a Function which contain load/store ops, and edges +// top-level operations in a FuncOp which contain load/store ops, and edges // are memref dependences between the nodes. // TODO(andydavis) Add a more flexible dependece graph representation. // TODO(andydavis) Add a depth parameter to dependence graph construction. @@ -257,7 +257,7 @@ public: // Initializes the dependence graph based on operations in 'f'. // Returns true on success, false otherwise. - bool init(Function f); + bool init(FuncOp f); // Returns the graph node for 'id'. Node *getNode(unsigned id) { @@ -637,7 +637,7 @@ public: // Assigns each node in the graph a node id based on program order in 'f'. // TODO(andydavis) Add support for taking a Block arg to construct the // dependence graph at a different depth. -bool MemRefDependenceGraph::init(Function f) { +bool MemRefDependenceGraph::init(FuncOp f) { DenseMap<Value *, SetVector<unsigned>> memrefAccesses; // TODO: support multi-block functions. diff --git a/mlir/lib/Transforms/LoopParametricTiling.cpp b/mlir/lib/Transforms/LoopParametricTiling.cpp index c2b23943794..77626f54a3c 100644 --- a/mlir/lib/Transforms/LoopParametricTiling.cpp +++ b/mlir/lib/Transforms/LoopParametricTiling.cpp @@ -43,7 +43,7 @@ public: : sizes(outerLoopSizes.begin(), outerLoopSizes.end()) {} void runOnFunction() override { - Function func = getFunction(); + FuncOp func = getFunction(); func.walk<ForOp>([this](ForOp op) { // Ignore nested loops. diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp index 2744e5ca05c..0a331cae100 100644 --- a/mlir/lib/Transforms/LoopTiling.cpp +++ b/mlir/lib/Transforms/LoopTiling.cpp @@ -261,7 +261,7 @@ LogicalResult mlir::tileCodeGen(MutableArrayRef<AffineForOp> band, // Identify valid and profitable bands of loops to tile. This is currently just // a temporary placeholder to test the mechanics of tiled code generation. // Returns all maximal outermost perfect loop nests to tile. -static void getTileableBands(Function f, +static void getTileableBands(FuncOp f, std::vector<SmallVector<AffineForOp, 6>> *bands) { // Get maximal perfect nest of 'affine.for' insts starting from root // (inclusive). diff --git a/mlir/lib/Transforms/LoopUnroll.cpp b/mlir/lib/Transforms/LoopUnroll.cpp index 6f13f623fe8..1c7f3393ada 100644 --- a/mlir/lib/Transforms/LoopUnroll.cpp +++ b/mlir/lib/Transforms/LoopUnroll.cpp @@ -92,7 +92,7 @@ void LoopUnroll::runOnFunction() { // Store innermost loops as we walk. std::vector<AffineForOp> loops; - void walkPostOrder(Function f) { + void walkPostOrder(FuncOp f) { for (auto &b : f) walkPostOrder(b.begin(), b.end()); } @@ -142,7 +142,7 @@ void LoopUnroll::runOnFunction() { ? clUnrollNumRepetitions : 1; // If the call back is provided, we will recurse until no loops are found. - Function func = getFunction(); + FuncOp func = getFunction(); for (unsigned i = 0; i < numRepetitions || getUnrollFactor; i++) { InnermostLoopGatherer ilg; ilg.walkPostOrder(func); diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp index df30e270fe6..2edf2a29386 100644 --- a/mlir/lib/Transforms/LowerAffine.cpp +++ b/mlir/lib/Transforms/LowerAffine.cpp @@ -726,7 +726,7 @@ public: } // end namespace -LogicalResult mlir::lowerAffineConstructs(Function function) { +LogicalResult mlir::lowerAffineConstructs(FuncOp function) { OwningRewritePatternList patterns; RewriteListBuilder<AffineApplyLowering, AffineDmaStartLowering, AffineDmaWaitLowering, AffineLoadLowering, diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp index fcac60c6a92..da8fc69fc0a 100644 --- a/mlir/lib/Transforms/MaterializeVectors.cpp +++ b/mlir/lib/Transforms/MaterializeVectors.cpp @@ -667,7 +667,7 @@ static bool emitSlice(MaterializationState *state, /// because we currently disallow vectorization of defs that come from another /// scope. /// TODO(ntv): please document return value. -static bool materialize(Function f, const SetVector<Operation *> &terminators, +static bool materialize(FuncOp f, const SetVector<Operation *> &terminators, MaterializationState *state) { DenseSet<Operation *> seen; DominanceInfo domInfo(f); @@ -731,7 +731,7 @@ void MaterializeVectorsPass::runOnFunction() { NestedPatternContext mlContext; // TODO(ntv): Check to see if this supports arbitrary top-level code. - Function f = getFunction(); + FuncOp f = getFunction(); if (f.getBlocks().size() != 1) return; diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp index 13a53e3a944..93f7331f7a3 100644 --- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp +++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp @@ -213,7 +213,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(AffineLoadOp loadOp) { void MemRefDataFlowOpt::runOnFunction() { // Only supports single block functions at the moment. - Function f = getFunction(); + FuncOp f = getFunction(); if (f.getBlocks().size() != 1) { markAllAnalysesPreserved(); return; diff --git a/mlir/lib/Transforms/StripDebugInfo.cpp b/mlir/lib/Transforms/StripDebugInfo.cpp index c7c3621781a..c82354ed49e 100644 --- a/mlir/lib/Transforms/StripDebugInfo.cpp +++ b/mlir/lib/Transforms/StripDebugInfo.cpp @@ -29,7 +29,7 @@ struct StripDebugInfo : public FunctionPass<StripDebugInfo> { } // end anonymous namespace void StripDebugInfo::runOnFunction() { - Function func = getFunction(); + FuncOp func = getFunction(); auto unknownLoc = UnknownLoc::get(&getContext()); // Strip the debug info from the function and its operations. diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp index e185f702d27..c65370233da 100644 --- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp +++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp @@ -44,7 +44,7 @@ namespace { /// applies the locally optimal patterns in a roughly "bottom up" way. class GreedyPatternRewriteDriver : public PatternRewriter { public: - explicit GreedyPatternRewriteDriver(Function fn, + explicit GreedyPatternRewriteDriver(FuncOp fn, OwningRewritePatternList &&patterns) : PatternRewriter(fn.getBody()), matcher(std::move(patterns)) { worklist.reserve(64); @@ -213,7 +213,7 @@ bool GreedyPatternRewriteDriver::simplifyFunction(int maxIterations) { /// patterns in a greedy work-list driven manner. Return true if no more /// patterns can be matched in the result function. /// -bool mlir::applyPatternsGreedily(Function fn, +bool mlir::applyPatternsGreedily(FuncOp fn, OwningRewritePatternList &&patterns) { GreedyPatternRewriteDriver driver(fn, std::move(patterns)); bool converged = driver.simplifyFunction(maxPatternMatchIterations); diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp index 5a0fb1f49fb..1f823391c3a 100644 --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -153,11 +153,11 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) { return success(); } -/// Promotes all single iteration for op's in the Function, i.e., moves +/// Promotes all single iteration for op's in the FuncOp, i.e., moves /// their body into the containing Block. -void mlir::promoteSingleIterationLoops(Function *f) { +void mlir::promoteSingleIterationLoops(FuncOp f) { // Gathers all innermost loops through a post order pruned walk. - f->walk<AffineForOp>( + f.walk<AffineForOp>( [](AffineForOp forOp) { promoteIfSingleIteration(forOp); }); } diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp index 4aff2ac4d13..43a6a2f7a82 100644 --- a/mlir/lib/Transforms/Vectorize.cpp +++ b/mlir/lib/Transforms/Vectorize.cpp @@ -1229,7 +1229,7 @@ static LogicalResult vectorizeRootMatch(NestedMatch m, /// Applies vectorization to the current Function by searching over a bunch of /// predetermined patterns. void Vectorize::runOnFunction() { - Function f = getFunction(); + FuncOp f = getFunction(); if (!fastestVaryingPattern.empty() && fastestVaryingPattern.size() != vectorSizes.size()) { f.emitRemark("Fastest varying pattern specified with different size than " |

