diff options
| author | River Riddle <riverriddle@google.com> | 2018-10-30 14:59:22 -0700 |
|---|---|---|
| committer | jpienaar <jpienaar@google.com> | 2019-03-29 13:45:54 -0700 |
| commit | 4c465a181db49c436f62da303e8fdd3ed317fee7 (patch) | |
| tree | fb190912d0714222d6e336e19d5b8ea16342fb6e /mlir/lib/Transforms | |
| parent | 75376b8e33c67a42e3dca2c597197e0622b6eaa2 (diff) | |
| download | bcm5719-llvm-4c465a181db49c436f62da303e8fdd3ed317fee7.tar.gz bcm5719-llvm-4c465a181db49c436f62da303e8fdd3ed317fee7.zip | |
Implement value type abstraction for types.
This is done by changing Type to be a POD interface around an underlying pointer storage and adding in-class support for isa/dyn_cast/cast.
PiperOrigin-RevId: 219372163
Diffstat (limited to 'mlir/lib/Transforms')
| -rw-r--r-- | mlir/lib/Transforms/ConstantFold.cpp | 6 | ||||
| -rw-r--r-- | mlir/lib/Transforms/PipelineDataTransfer.cpp | 14 | ||||
| -rw-r--r-- | mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp | 2 | ||||
| -rw-r--r-- | mlir/lib/Transforms/Utils/Utils.cpp | 10 | ||||
| -rw-r--r-- | mlir/lib/Transforms/Vectorize.cpp | 21 |
5 files changed, 27 insertions, 26 deletions
diff --git a/mlir/lib/Transforms/ConstantFold.cpp b/mlir/lib/Transforms/ConstantFold.cpp index 81994ddfab4..15dd89bb758 100644 --- a/mlir/lib/Transforms/ConstantFold.cpp +++ b/mlir/lib/Transforms/ConstantFold.cpp @@ -31,7 +31,7 @@ struct ConstantFold : public FunctionPass, StmtWalker<ConstantFold> { SmallVector<SSAValue *, 8> existingConstants; // Operation statements that were folded and that need to be erased. std::vector<OperationStmt *> opStmtsToErase; - using ConstantFactoryType = std::function<SSAValue *(Attribute, Type *)>; + using ConstantFactoryType = std::function<SSAValue *(Attribute, Type)>; bool foldOperation(Operation *op, SmallVectorImpl<SSAValue *> &existingConstants, @@ -106,7 +106,7 @@ PassResult ConstantFold::runOnCFGFunction(CFGFunction *f) { for (auto instIt = bb.begin(), e = bb.end(); instIt != e;) { auto &inst = *instIt++; - auto constantFactory = [&](Attribute value, Type *type) -> SSAValue * { + auto constantFactory = [&](Attribute value, Type type) -> SSAValue * { builder.setInsertionPoint(&inst); return builder.create<ConstantOp>(inst.getLoc(), value, type); }; @@ -134,7 +134,7 @@ PassResult ConstantFold::runOnCFGFunction(CFGFunction *f) { // Override the walker's operation statement visit for constant folding. void ConstantFold::visitOperationStmt(OperationStmt *stmt) { - auto constantFactory = [&](Attribute value, Type *type) -> SSAValue * { + auto constantFactory = [&](Attribute value, Type type) -> SSAValue * { MLFuncBuilder builder(stmt); return builder.create<ConstantOp>(stmt->getLoc(), value, type); }; diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp index d96d65b5fb7..90421819d82 100644 --- a/mlir/lib/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp @@ -77,23 +77,23 @@ static bool doubleBuffer(const MLValue *oldMemRef, ForStmt *forStmt) { bInner.setInsertionPoint(forStmt, forStmt->begin()); // Doubles the shape with a leading dimension extent of 2. - auto doubleShape = [&](MemRefType *oldMemRefType) -> MemRefType * { + auto doubleShape = [&](MemRefType oldMemRefType) -> MemRefType { // Add the leading dimension in the shape for the double buffer. - ArrayRef<int> shape = oldMemRefType->getShape(); + ArrayRef<int> shape = oldMemRefType.getShape(); SmallVector<int, 4> shapeSizes(shape.begin(), shape.end()); shapeSizes.insert(shapeSizes.begin(), 2); - auto *newMemRefType = - bInner.getMemRefType(shapeSizes, oldMemRefType->getElementType(), {}, - oldMemRefType->getMemorySpace()); + auto newMemRefType = + bInner.getMemRefType(shapeSizes, oldMemRefType.getElementType(), {}, + oldMemRefType.getMemorySpace()); return newMemRefType; }; - auto *newMemRefType = doubleShape(cast<MemRefType>(oldMemRef->getType())); + auto newMemRefType = doubleShape(oldMemRef->getType().cast<MemRefType>()); // Create and place the alloc at the top level. MLFuncBuilder topBuilder(forStmt->getFunction()); - auto *newMemRef = cast<MLValue>( + auto newMemRef = cast<MLValue>( topBuilder.create<AllocOp>(forStmt->getLoc(), newMemRefType) ->getResult()); diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp index cdf5b7166a0..4ec89425189 100644 --- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp +++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp @@ -78,7 +78,7 @@ private: /// As part of canonicalization, we move constants to the top of the entry /// block of the current function and de-duplicate them. This keeps track of /// constants we have done this for. - DenseMap<std::pair<Attribute, Type *>, Operation *> uniquedConstants; + DenseMap<std::pair<Attribute, Type>, Operation *> uniquedConstants; }; }; // end anonymous namespace diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp index edd8ce85317..ad9d6dcb769 100644 --- a/mlir/lib/Transforms/Utils/Utils.cpp +++ b/mlir/lib/Transforms/Utils/Utils.cpp @@ -52,9 +52,9 @@ bool mlir::replaceAllMemRefUsesWith(const MLValue *oldMemRef, MLValue *newMemRef, ArrayRef<MLValue *> extraIndices, AffineMap indexRemap) { - unsigned newMemRefRank = cast<MemRefType>(newMemRef->getType())->getRank(); + unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank(); (void)newMemRefRank; // unused in opt mode - unsigned oldMemRefRank = cast<MemRefType>(oldMemRef->getType())->getRank(); + unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank(); (void)newMemRefRank; if (indexRemap) { assert(indexRemap.getNumInputs() == oldMemRefRank); @@ -64,8 +64,8 @@ bool mlir::replaceAllMemRefUsesWith(const MLValue *oldMemRef, } // Assert same elemental type. - assert(cast<MemRefType>(oldMemRef->getType())->getElementType() == - cast<MemRefType>(newMemRef->getType())->getElementType()); + assert(oldMemRef->getType().cast<MemRefType>().getElementType() == + newMemRef->getType().cast<MemRefType>().getElementType()); // Check if memref was used in a non-deferencing context. for (const StmtOperand &use : oldMemRef->getUses()) { @@ -139,7 +139,7 @@ bool mlir::replaceAllMemRefUsesWith(const MLValue *oldMemRef, opStmt->operand_end()); // Result types don't change. Both memref's are of the same elemental type. - SmallVector<Type *, 8> resultTypes; + SmallVector<Type, 8> resultTypes; resultTypes.reserve(opStmt->getNumResults()); for (const auto *result : opStmt->getResults()) resultTypes.push_back(result->getType()); diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp index d7a1f531cef..511afa95993 100644 --- a/mlir/lib/Transforms/Vectorize.cpp +++ b/mlir/lib/Transforms/Vectorize.cpp @@ -202,15 +202,15 @@ static bool analyzeProfitability(MLFunctionMatches matches, /// sizes specified by vectorSize. The MemRef lives in the same memory space as /// tmpl. The MemRef should be promoted to a closer memory address space in a /// later pass. -static MemRefType *getVectorizedMemRefType(MemRefType *tmpl, - ArrayRef<int> vectorSizes) { - auto *elementType = tmpl->getElementType(); - assert(!dyn_cast<VectorType>(elementType) && +static MemRefType getVectorizedMemRefType(MemRefType tmpl, + ArrayRef<int> vectorSizes) { + auto elementType = tmpl.getElementType(); + assert(!elementType.dyn_cast<VectorType>() && "Can't vectorize an already vector type"); - assert(tmpl->getAffineMaps().empty() && + assert(tmpl.getAffineMaps().empty() && "Unsupported non-implicit identity map"); return MemRefType::get({1}, VectorType::get(vectorSizes, elementType), {}, - tmpl->getMemorySpace()); + tmpl.getMemorySpace()); } /// Creates an unaligned load with the following semantics: @@ -258,7 +258,7 @@ static void createUnalignedLoad(MLFuncBuilder *b, Location *loc, operands.insert(operands.end(), dstMemRef); operands.insert(operands.end(), dstIndices.begin(), dstIndices.end()); using functional::map; - std::function<Type *(SSAValue *)> getType = [](SSAValue *v) -> Type * { + std::function<Type(SSAValue *)> getType = [](SSAValue *v) -> Type { return v->getType(); }; auto types = map(getType, operands); @@ -310,7 +310,7 @@ static void createUnalignedStore(MLFuncBuilder *b, Location *loc, operands.insert(operands.end(), dstMemRef); operands.insert(operands.end(), dstIndices.begin(), dstIndices.end()); using functional::map; - std::function<Type *(SSAValue *)> getType = [](SSAValue *v) -> Type * { + std::function<Type(SSAValue *)> getType = [](SSAValue *v) -> Type { return v->getType(); }; auto types = map(getType, operands); @@ -348,8 +348,9 @@ static std::function<ToType *(T *)> unwrapPtr() { template <typename LoadOrStoreOpPointer> static MLValue *materializeVector(MLValue *iv, LoadOrStoreOpPointer memoryOp, ArrayRef<int> vectorSize) { - auto *memRefType = cast<MemRefType>(memoryOp->getMemRef()->getType()); - auto *vectorMemRefType = getVectorizedMemRefType(memRefType, vectorSize); + auto memRefType = + memoryOp->getMemRef()->getType().template cast<MemRefType>(); + auto vectorMemRefType = getVectorizedMemRefType(memRefType, vectorSize); // Materialize a MemRef with 1 vector. auto *opStmt = cast<OperationStmt>(memoryOp->getOperation()); |

