summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Conversion
diff options
context:
space:
mode:
Diffstat (limited to 'mlir/lib/Conversion')
-rw-r--r--mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp10
-rw-r--r--mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp32
-rw-r--r--mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp9
-rw-r--r--mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp4
4 files changed, 34 insertions, 21 deletions
diff --git a/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp b/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
index 52549933f9d..a88c9c493c5 100644
--- a/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
+++ b/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
@@ -70,6 +70,8 @@ using urem = ValueBuilder<mlir::LLVM::URemOp>;
using llvm_alloca = ValueBuilder<LLVM::AllocaOp>;
using llvm_return = OperationBuilder<LLVM::ReturnOp>;
+namespace {
+
template <typename T>
static LLVMType getPtrToElementType(T containerType,
LLVMTypeConverter &lowering) {
@@ -104,7 +106,6 @@ static Type convertLinalgType(Type t, LLVMTypeConverter &lowering) {
return Type();
}
-namespace {
/// EDSC-compatible wrapper for MemRefDescriptor.
class BaseViewConversionHelper {
public:
@@ -139,7 +140,6 @@ private:
MemRefDescriptor d;
};
-} // namespace
// RangeOp creates a new range descriptor.
class RangeOpConversion : public LLVMOpLowering {
@@ -421,12 +421,16 @@ static FlatSymbolRefAttr getLibraryCallSymbolRef(Operation *op,
return fnNameAttr;
}
+} // namespace
+
Type LinalgTypeConverter::convertType(Type t) {
if (auto result = LLVMTypeConverter::convertType(t))
return result;
return convertLinalgType(t, *this);
}
+namespace {
+
// LinalgOpConversion<LinalgOp> creates a new call to the
// `LinalgOp::getLibraryCallName()` function.
// The implementation of the function can be either in the same module or in an
@@ -552,6 +556,8 @@ populateLinalgToStandardConversionPatterns(OwningRewritePatternList &patterns,
ctx);
}
+} // namespace
+
/// Populate the given list with patterns that convert from Linalg to LLVM.
void mlir::populateLinalgToLLVMConversionPatterns(
LinalgTypeConverter &converter, OwningRewritePatternList &patterns,
diff --git a/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp b/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp
index 3ea1f85d62f..15633ac6d50 100644
--- a/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp
+++ b/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp
@@ -98,7 +98,7 @@ static Value getOrEmitUpperBound(ForOp forOp, OpBuilder &) {
// This roughly corresponds to the "matcher" part of the pattern-based
// rewriting infrastructure.
template <typename OpTy>
-LogicalResult checkLoopNestMappableImpl(OpTy forOp, unsigned numDims) {
+static LogicalResult checkLoopNestMappableImpl(OpTy forOp, unsigned numDims) {
Region &limit = forOp.region();
for (unsigned i = 0, e = numDims; i < e; ++i) {
Operation *nested = &forOp.getBody()->front();
@@ -124,8 +124,8 @@ LogicalResult checkLoopNestMappableImpl(OpTy forOp, unsigned numDims) {
}
template <typename OpTy>
-LogicalResult checkLoopNestMappable(OpTy forOp, unsigned numBlockDims,
- unsigned numThreadDims) {
+static LogicalResult checkLoopNestMappable(OpTy forOp, unsigned numBlockDims,
+ unsigned numThreadDims) {
if (numBlockDims < 1 || numThreadDims < 1) {
LLVM_DEBUG(llvm::dbgs() << "nothing to map");
return success();
@@ -142,8 +142,8 @@ LogicalResult checkLoopNestMappable(OpTy forOp, unsigned numBlockDims,
}
template <typename OpTy>
-LogicalResult checkLoopOpMappable(OpTy forOp, unsigned numBlockDims,
- unsigned numThreadDims) {
+static LogicalResult checkLoopOpMappable(OpTy forOp, unsigned numBlockDims,
+ unsigned numThreadDims) {
if (numBlockDims < 1 || numThreadDims < 1) {
LLVM_DEBUG(llvm::dbgs() << "nothing to map");
return success();
@@ -265,8 +265,8 @@ Optional<OpTy> LoopToGpuConverter::collectBounds(OpTy forOp,
/// `nids`. The innermost loop is mapped to the x-dimension, followed by the
/// next innermost loop to y-dimension, followed by z-dimension.
template <typename OpTy>
-OpTy createGPULaunchLoops(OpTy rootForOp, ArrayRef<Value> ids,
- ArrayRef<Value> nids) {
+static OpTy createGPULaunchLoops(OpTy rootForOp, ArrayRef<Value> ids,
+ ArrayRef<Value> nids) {
auto nDims = ids.size();
assert(nDims == nids.size());
for (auto dim : llvm::seq<unsigned>(0, nDims)) {
@@ -285,9 +285,10 @@ OpTy createGPULaunchLoops(OpTy rootForOp, ArrayRef<Value> ids,
/// Utility method to convert the gpu::KernelDim3 object for representing id of
/// each workgroup/workitem and number of workgroup/workitems along a dimension
/// of the launch into a container.
-void packIdAndNumId(gpu::KernelDim3 kernelIds, gpu::KernelDim3 kernelNids,
- unsigned nDims, SmallVectorImpl<Value> &ids,
- SmallVectorImpl<Value> &nids) {
+static void packIdAndNumId(gpu::KernelDim3 kernelIds,
+ gpu::KernelDim3 kernelNids, unsigned nDims,
+ SmallVectorImpl<Value> &ids,
+ SmallVectorImpl<Value> &nids) {
assert(nDims <= 3 && "invalid number of launch dimensions");
SmallVector<Value, 3> allIds = {kernelIds.z, kernelIds.y, kernelIds.x};
SmallVector<Value, 3> allNids = {kernelNids.z, kernelNids.y, kernelNids.x};
@@ -300,9 +301,9 @@ void packIdAndNumId(gpu::KernelDim3 kernelIds, gpu::KernelDim3 kernelNids,
/// Generate the body of the launch operation.
template <typename OpTy>
-LogicalResult createLaunchBody(OpBuilder &builder, OpTy rootForOp,
- gpu::LaunchOp launchOp, unsigned numBlockDims,
- unsigned numThreadDims) {
+static LogicalResult
+createLaunchBody(OpBuilder &builder, OpTy rootForOp, gpu::LaunchOp launchOp,
+ unsigned numBlockDims, unsigned numThreadDims) {
OpBuilder::InsertionGuard bodyInsertionGuard(builder);
builder.setInsertionPointToEnd(&launchOp.body().front());
auto returnOp = builder.create<gpu::ReturnOp>(launchOp.getLoc());
@@ -337,8 +338,9 @@ LogicalResult createLaunchBody(OpBuilder &builder, OpTy rootForOp,
// Convert the computation rooted at the `rootForOp`, into a GPU kernel with the
// given workgroup size and number of workgroups.
template <typename OpTy>
-LogicalResult createLaunchFromOp(OpTy rootForOp, ArrayRef<Value> numWorkGroups,
- ArrayRef<Value> workGroupSizes) {
+static LogicalResult createLaunchFromOp(OpTy rootForOp,
+ ArrayRef<Value> numWorkGroups,
+ ArrayRef<Value> workGroupSizes) {
OpBuilder builder(rootForOp.getOperation());
if (numWorkGroups.size() > 3) {
return rootForOp.emitError("invalid ")
diff --git a/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp b/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp
index 7e7b7ef6ea6..cc535b938fb 100644
--- a/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp
+++ b/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp
@@ -139,10 +139,11 @@ public:
// TODO(ravishankarm) : This method assumes that the `origBaseType` is a
// MemRefType with AffineMap that has static strides. Handle dynamic strides
-spirv::AccessChainOp getElementPtr(OpBuilder &builder,
- SPIRVTypeConverter &typeConverter,
- Location loc, MemRefType origBaseType,
- Value basePtr, ArrayRef<Value> indices) {
+static spirv::AccessChainOp getElementPtr(OpBuilder &builder,
+ SPIRVTypeConverter &typeConverter,
+ Location loc, MemRefType origBaseType,
+ Value basePtr,
+ ArrayRef<Value> indices) {
// Get base and offset of the MemRefType and verify they are static.
int64_t offset;
SmallVector<int64_t, 4> strides;
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index cc6636df00e..8ec3212f7c0 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -34,6 +34,8 @@
using namespace mlir;
using namespace mlir::vector;
+namespace {
+
template <typename T>
static LLVM::LLVMType getPtrToElementType(T containerType,
LLVMTypeConverter &lowering) {
@@ -948,6 +950,8 @@ public:
}
};
+} // namespace
+
/// Populate the given list with patterns that convert from Vector to LLVM.
void mlir::populateVectorToLLVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
OpenPOWER on IntegriCloud