summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Conversion/GPUToCUDA
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-07-10 15:49:27 -0700
committerjpienaar <jpienaar@google.com>2019-07-12 08:43:03 -0700
commit6da343ecfcb61493dbfae7b7a1577aa4ee83871a (patch)
tree943231e9d7caeed6f791723e1aaf8eb0ae6a7186 /mlir/lib/Conversion/GPUToCUDA
parent122cab677028e494616f40efdcb9bb43c0923ce4 (diff)
downloadbcm5719-llvm-6da343ecfcb61493dbfae7b7a1577aa4ee83871a.tar.gz
bcm5719-llvm-6da343ecfcb61493dbfae7b7a1577aa4ee83871a.zip
NFC: Replace Module::getNamedFunction with lookupSymbol<FuncOp>.
This allows for removing the last direct reference to FuncOp from ModuleOp. PiperOrigin-RevId: 257498296
Diffstat (limited to 'mlir/lib/Conversion/GPUToCUDA')
-rw-r--r--mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp22
-rw-r--r--mlir/lib/Conversion/GPUToCUDA/GenerateCubinAccessors.cpp2
2 files changed, 12 insertions, 12 deletions
diff --git a/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp b/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp
index 80ddfa66d93..1cd4e494fa6 100644
--- a/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp
+++ b/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp
@@ -152,7 +152,7 @@ private:
void GpuLaunchFuncToCudaCallsPass::declareCudaFunctions(Location loc) {
ModuleOp module = getModule();
Builder builder(module);
- if (!module.getNamedFunction(cuModuleLoadName)) {
+ if (!module.lookupSymbol<FuncOp>(cuModuleLoadName)) {
module.push_back(
FuncOp::create(loc, cuModuleLoadName,
builder.getFunctionType(
@@ -162,7 +162,7 @@ void GpuLaunchFuncToCudaCallsPass::declareCudaFunctions(Location loc) {
},
getCUResultType())));
}
- if (!module.getNamedFunction(cuModuleGetFunctionName)) {
+ if (!module.lookupSymbol<FuncOp>(cuModuleGetFunctionName)) {
// The helper uses void* instead of CUDA's opaque CUmodule and
// CUfunction.
module.push_back(
@@ -175,7 +175,7 @@ void GpuLaunchFuncToCudaCallsPass::declareCudaFunctions(Location loc) {
},
getCUResultType())));
}
- if (!module.getNamedFunction(cuLaunchKernelName)) {
+ if (!module.lookupSymbol<FuncOp>(cuLaunchKernelName)) {
// Other than the CUDA api, the wrappers use uintptr_t to match the
// LLVM type if MLIR's index type, which the GPU dialect uses.
// Furthermore, they use void* instead of CUDA's opaque CUfunction and
@@ -198,14 +198,14 @@ void GpuLaunchFuncToCudaCallsPass::declareCudaFunctions(Location loc) {
},
getCUResultType())));
}
- if (!module.getNamedFunction(cuGetStreamHelperName)) {
+ if (!module.lookupSymbol<FuncOp>(cuGetStreamHelperName)) {
// Helper function to get the current CUDA stream. Uses void* instead of
// CUDAs opaque CUstream.
module.push_back(FuncOp::create(
loc, cuGetStreamHelperName,
builder.getFunctionType({}, getPointerType() /* void *stream */)));
}
- if (!module.getNamedFunction(cuStreamSynchronizeName)) {
+ if (!module.lookupSymbol<FuncOp>(cuStreamSynchronizeName)) {
module.push_back(
FuncOp::create(loc, cuStreamSynchronizeName,
builder.getFunctionType(
@@ -322,7 +322,7 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
// Emit a call to the cubin getter to retrieve a pointer to the data that
// represents the cubin at runtime.
// TODO(herhut): This should rather be a static global once supported.
- auto kernelFunction = getModule().getNamedFunction(launchOp.kernel());
+ auto kernelFunction = getModule().lookupSymbol<FuncOp>(launchOp.kernel());
auto cubinGetter =
kernelFunction.getAttrOfType<FunctionAttr>(kCubinGetterAnnotation);
if (!cubinGetter) {
@@ -335,7 +335,7 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
// Emit the load module call to load the module data. Error checking is done
// in the called helper function.
auto cuModule = allocatePointer(builder, loc);
- FuncOp cuModuleLoad = getModule().getNamedFunction(cuModuleLoadName);
+ FuncOp cuModuleLoad = getModule().lookupSymbol<FuncOp>(cuModuleLoadName);
builder.create<LLVM::CallOp>(loc, ArrayRef<Type>{getCUResultType()},
builder.getFunctionAttr(cuModuleLoad),
ArrayRef<Value *>{cuModule, data.getResult(0)});
@@ -346,19 +346,19 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
auto kernelName = generateKernelNameConstant(kernelFunction, loc, builder);
auto cuFunction = allocatePointer(builder, loc);
FuncOp cuModuleGetFunction =
- getModule().getNamedFunction(cuModuleGetFunctionName);
+ getModule().lookupSymbol<FuncOp>(cuModuleGetFunctionName);
builder.create<LLVM::CallOp>(
loc, ArrayRef<Type>{getCUResultType()},
builder.getFunctionAttr(cuModuleGetFunction),
ArrayRef<Value *>{cuFunction, cuOwningModuleRef, kernelName});
// Grab the global stream needed for execution.
FuncOp cuGetStreamHelper =
- getModule().getNamedFunction(cuGetStreamHelperName);
+ getModule().lookupSymbol<FuncOp>(cuGetStreamHelperName);
auto cuStream = builder.create<LLVM::CallOp>(
loc, ArrayRef<Type>{getPointerType()},
builder.getFunctionAttr(cuGetStreamHelper), ArrayRef<Value *>{});
// Invoke the function with required arguments.
- auto cuLaunchKernel = getModule().getNamedFunction(cuLaunchKernelName);
+ auto cuLaunchKernel = getModule().lookupSymbol<FuncOp>(cuLaunchKernelName);
auto cuFunctionRef =
builder.create<LLVM::LoadOp>(loc, getPointerType(), cuFunction);
auto paramsArray = setupParamsArray(launchOp, builder);
@@ -375,7 +375,7 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
paramsArray, /* kernel params */
nullpointer /* extra */});
// Sync on the stream to make it synchronous.
- auto cuStreamSync = getModule().getNamedFunction(cuStreamSynchronizeName);
+ auto cuStreamSync = getModule().lookupSymbol<FuncOp>(cuStreamSynchronizeName);
builder.create<LLVM::CallOp>(loc, ArrayRef<Type>{getCUResultType()},
builder.getFunctionAttr(cuStreamSync),
ArrayRef<Value *>(cuStream.getResult(0)));
diff --git a/mlir/lib/Conversion/GPUToCUDA/GenerateCubinAccessors.cpp b/mlir/lib/Conversion/GPUToCUDA/GenerateCubinAccessors.cpp
index e19e2de99fb..152624fcf2e 100644
--- a/mlir/lib/Conversion/GPUToCUDA/GenerateCubinAccessors.cpp
+++ b/mlir/lib/Conversion/GPUToCUDA/GenerateCubinAccessors.cpp
@@ -60,7 +60,7 @@ private:
}
FuncOp getMallocHelper(Location loc, Builder &builder) {
- FuncOp result = getModule().getNamedFunction(kMallocHelperName);
+ FuncOp result = getModule().lookupSymbol<FuncOp>(kMallocHelperName);
if (!result) {
result = FuncOp::create(
loc, kMallocHelperName,
OpenPOWER on IntegriCloud