diff options
| author | Chris Lattner <clattner@google.com> | 2018-12-27 14:35:10 -0800 |
|---|---|---|
| committer | jpienaar <jpienaar@google.com> | 2019-03-29 14:40:06 -0700 |
| commit | 3f190312f8f7f09b5910bc77e80268402732ce6b (patch) | |
| tree | 1ac0989c889d04e1acb0370952ed3bf1f141e17d /mlir/lib/Transforms/LowerVectorTransfers.cpp | |
| parent | 776b035646d809d8b31662363e797f4d7f26c223 (diff) | |
| download | bcm5719-llvm-3f190312f8f7f09b5910bc77e80268402732ce6b.tar.gz bcm5719-llvm-3f190312f8f7f09b5910bc77e80268402732ce6b.zip | |
Merge SSAValue, CFGValue, and MLValue together into a single Value class, which
is the new base of the SSA value hierarchy. This CL also standardizes all the
nomenclature and comments to use 'Value' where appropriate. This also eliminates a large number of cast<MLValue>(x)'s, which is very soothing.
This is step 11/n towards merging instructions and statements, NFC.
PiperOrigin-RevId: 227064624
Diffstat (limited to 'mlir/lib/Transforms/LowerVectorTransfers.cpp')
| -rw-r--r-- | mlir/lib/Transforms/LowerVectorTransfers.cpp | 29 |
1 files changed, 13 insertions, 16 deletions
diff --git a/mlir/lib/Transforms/LowerVectorTransfers.cpp b/mlir/lib/Transforms/LowerVectorTransfers.cpp index fd07619a165..013b5080367 100644 --- a/mlir/lib/Transforms/LowerVectorTransfers.cpp +++ b/mlir/lib/Transforms/LowerVectorTransfers.cpp @@ -29,17 +29,14 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/Location.h" -#include "mlir/IR/MLValue.h" #include "mlir/IR/Matchers.h" #include "mlir/IR/OperationSupport.h" #include "mlir/IR/PatternMatch.h" -#include "mlir/IR/SSAValue.h" #include "mlir/IR/Types.h" #include "mlir/Pass.h" #include "mlir/StandardOps/StandardOps.h" #include "mlir/SuperVectorOps/SuperVectorOps.h" #include "mlir/Support/Functional.h" -#include "mlir/Support/LLVM.h" #include "mlir/Transforms/MLPatternLoweringPass.h" #include "mlir/Transforms/Passes.h" @@ -62,26 +59,26 @@ using namespace mlir; #define DEBUG_TYPE "lower-vector-transfers" -/// Creates the SSAValue for the sum of `a` and `b` without building a +/// Creates the Value for the sum of `a` and `b` without building a /// full-fledged AffineMap for all indices. /// /// Prerequisites: /// `a` and `b` must be of IndexType. -static SSAValue *add(MLFuncBuilder *b, Location loc, SSAValue *v, SSAValue *w) { +static mlir::Value *add(MLFuncBuilder *b, Location loc, Value *v, Value *w) { assert(v->getType().isa<IndexType>() && "v must be of IndexType"); assert(w->getType().isa<IndexType>() && "w must be of IndexType"); auto *context = b->getContext(); auto d0 = getAffineDimExpr(0, context); auto d1 = getAffineDimExpr(1, context); auto map = AffineMap::get(2, 0, {d0 + d1}, {}); - return b->create<AffineApplyOp>(loc, map, ArrayRef<SSAValue *>{v, w}) + return b->create<AffineApplyOp>(loc, map, ArrayRef<mlir::Value *>{v, w}) ->getResult(0); } namespace { struct LowerVectorTransfersState : public MLFuncGlobalLoweringState { // Top of the function constant zero index. - SSAValue *zero; + Value *zero; }; } // namespace @@ -131,7 +128,8 @@ static void rewriteAsLoops(VectorTransferOpTy *transfer, // case of GPUs. if (std::is_same<VectorTransferOpTy, VectorTransferWriteOp>::value) { b.create<StoreOp>(vecView->getLoc(), transfer->getVector(), - vecView->getResult(), ArrayRef<SSAValue *>{state->zero}); + vecView->getResult(), + ArrayRef<mlir::Value *>{state->zero}); } // 3. Emit the loop-nest. @@ -140,7 +138,7 @@ static void rewriteAsLoops(VectorTransferOpTy *transfer, // TODO(ntv): Handle broadcast / slice properly. auto permutationMap = transfer->getPermutationMap(); SetVector<ForStmt *> loops; - SmallVector<SSAValue *, 8> accessIndices(transfer->getIndices()); + SmallVector<Value *, 8> accessIndices(transfer->getIndices()); for (auto it : llvm::enumerate(transfer->getVectorType().getShape())) { auto composed = composeWithUnboundedMap( getAffineDimExpr(it.index(), b.getContext()), permutationMap); @@ -168,17 +166,16 @@ static void rewriteAsLoops(VectorTransferOpTy *transfer, // b. write scalar to local. auto scalarLoad = b.create<LoadOp>(transfer->getLoc(), transfer->getMemRef(), accessIndices); - b.create<StoreOp>( - transfer->getLoc(), scalarLoad->getResult(), - tmpScalarAlloc->getResult(), - functional::map([](SSAValue *val) { return val; }, loops)); + b.create<StoreOp>(transfer->getLoc(), scalarLoad->getResult(), + tmpScalarAlloc->getResult(), + functional::map([](Value *val) { return val; }, loops)); } else { // VectorTransferWriteOp. // a. read scalar from local; // b. write scalar to remote. auto scalarLoad = b.create<LoadOp>( transfer->getLoc(), tmpScalarAlloc->getResult(), - functional::map([](SSAValue *val) { return val; }, loops)); + functional::map([](Value *val) { return val; }, loops)); b.create<StoreOp>(transfer->getLoc(), scalarLoad->getResult(), transfer->getMemRef(), accessIndices); } @@ -186,11 +183,11 @@ static void rewriteAsLoops(VectorTransferOpTy *transfer, // 5. Read the vector from local storage in case of a vector_transfer_read. // TODO(ntv): This vector_load operation should be further lowered in the // case of GPUs. - llvm::SmallVector<SSAValue *, 1> newResults = {}; + llvm::SmallVector<Value *, 1> newResults = {}; if (std::is_same<VectorTransferOpTy, VectorTransferReadOp>::value) { b.setInsertionPoint(cast<OperationStmt>(transfer->getOperation())); auto *vector = b.create<LoadOp>(transfer->getLoc(), vecView->getResult(), - ArrayRef<SSAValue *>{state->zero}) + ArrayRef<Value *>{state->zero}) ->getResult(); newResults.push_back(vector); } |

