diff options
| author | River Riddle <riverriddle@google.com> | 2019-02-06 11:08:18 -0800 |
|---|---|---|
| committer | jpienaar <jpienaar@google.com> | 2019-03-29 16:17:29 -0700 |
| commit | 3227dee15dcfa0211e3ff3d1ef229d61a3c4e38f (patch) | |
| tree | c61f7f4841c198271dd9689d93e8df6f9866c0f9 /mlir/lib/Transforms | |
| parent | b9dde91ea6ec449ecd4203fb06fdf726001a0c37 (diff) | |
| download | bcm5719-llvm-3227dee15dcfa0211e3ff3d1ef229d61a3c4e38f.tar.gz bcm5719-llvm-3227dee15dcfa0211e3ff3d1ef229d61a3c4e38f.zip | |
NFC: Rename affine_apply to affine.apply. This is the first step to adding a namespace to the affine dialect.
PiperOrigin-RevId: 232707862
Diffstat (limited to 'mlir/lib/Transforms')
| -rw-r--r-- | mlir/lib/Transforms/DmaGeneration.cpp | 2 | ||||
| -rw-r--r-- | mlir/lib/Transforms/LowerAffine.cpp | 6 | ||||
| -rw-r--r-- | mlir/lib/Transforms/MaterializeVectors.cpp | 8 | ||||
| -rw-r--r-- | mlir/lib/Transforms/MemRefDataFlowOpt.cpp | 2 | ||||
| -rw-r--r-- | mlir/lib/Transforms/PipelineDataTransfer.cpp | 2 | ||||
| -rw-r--r-- | mlir/lib/Transforms/Utils/LoopUtils.cpp | 2 | ||||
| -rw-r--r-- | mlir/lib/Transforms/Utils/Utils.cpp | 14 |
7 files changed, 18 insertions, 18 deletions
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp index 7fd9128b358..855ff37f60f 100644 --- a/mlir/lib/Transforms/DmaGeneration.cpp +++ b/mlir/lib/Transforms/DmaGeneration.cpp @@ -407,7 +407,7 @@ bool DmaGeneration::generateDma(const MemRefRegion ®ion, Block *block, // access indices (subtracting out lower bound offsets for each dimension). // Ex: to replace load %A[%i, %j] with load %Abuf[%i - %iT, %j - %jT], // index remap will be (%i, %j) -> (%i - %iT, %j - %jT), - // i.e., affine_apply (d0, d1, d2, d3) -> (d2-d0, d3-d1) (%iT, %jT, %i, %j), + // i.e., affine.apply (d0, d1, d2, d3) -> (d2-d0, d3-d1) (%iT, %jT, %i, %j), // and (%iT, %jT) will be the 'extraOperands' for 'rep all memref uses with'. // d2, d3 correspond to the original indices (%i, %j). SmallVector<AffineExpr, 4> remapExprs; diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp index 88ccc90c18b..0d8eb8a4761 100644 --- a/mlir/lib/Transforms/LowerAffine.cpp +++ b/mlir/lib/Transforms/LowerAffine.cpp @@ -414,7 +414,7 @@ bool LowerAffinePass::lowerAffineFor(OpPointer<AffineForOp> forOp) { // +--------------------------------+ // | <code before the AffineIfOp> | // | %zero = constant 0 : index | -// | %v = affine_apply #expr1(%ops) | +// | %v = affine.apply #expr1(%ops) | // | %c = cmpi "sge" %v, %zero | // | cond_br %c, %next, %else | // +--------------------------------+ @@ -516,7 +516,7 @@ bool LowerAffinePass::lowerAffineIf(AffineIfOp *ifOp) { auto integerSet = ifOp->getIntegerSet(); // Implement short-circuit logic. For each affine expression in the 'if' - // condition, convert it into an affine map and call `affine_apply` to obtain + // condition, convert it into an affine map and call `affine.apply` to obtain // the resulting value. Perform the equality or the greater-than-or-equality // test between this value and zero depending on the equality flag of the // condition. If the test fails, jump immediately to the false branch, which @@ -573,7 +573,7 @@ bool LowerAffinePass::lowerAffineIf(AffineIfOp *ifOp) { return false; } -// Convert an "affine_apply" operation into a sequence of arithmetic +// Convert an "affine.apply" operation into a sequence of arithmetic // instructions using the StandardOps dialect. Return true on error. bool LowerAffinePass::lowerAffineApply(AffineApplyOp *op) { FuncBuilder builder(op->getInstruction()); diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp index f55c2154f08..be5a03bc416 100644 --- a/mlir/lib/Transforms/MaterializeVectors.cpp +++ b/mlir/lib/Transforms/MaterializeVectors.cpp @@ -124,25 +124,25 @@ /// for %i1 = 0 to %arg1 step 4 { /// for %i2 = 0 to %arg2 { /// for %i3 = 0 to %arg3 step 4 { -/// %1 = affine_apply (d0, d1, d2, d3) -> (d0, d1, d2, d3) +/// %1 = affine.apply (d0, d1, d2, d3) -> (d0, d1, d2, d3) /// (%i0, %i1, %i2, %i3) /// vector_transfer_write f1, %0, %1#0, %1#1, %1#2, %1#3 /// {permutation_map: (d0, d1, d2, d3) -> (d1, d0)} : /// vector<4x4xf32>, memref<?x?x?x?xf32>, /// index, index, index, index -/// %2 = affine_apply (d0, d1, d2, d3) -> (d0, d1, d2, d3 + 1) +/// %2 = affine.apply (d0, d1, d2, d3) -> (d0, d1, d2, d3 + 1) /// (%i0, %i1, %i2, %i3) /// vector_transfer_write {{.*}}, %0, %2#0, %2#1, %2#2, %2#3 /// {permutation_map: (d0, d1, d2, d3) -> (d1, d0)} : /// vector<4x4xf32>, memref<?x?x?x?xf32>, /// index, index, index, index -/// %3 = affine_apply (d0, d1, d2, d3) -> (d0, d1, d2, d3 + 2) +/// %3 = affine.apply (d0, d1, d2, d3) -> (d0, d1, d2, d3 + 2) /// (%i0, %i1, %i2, %i3) /// vector_transfer_write {{.*}}, %0, %3#0, %3#1, %3#2, %3#3 /// {permutation_map: (d0, d1, d2, d3) -> (d1, d0)} : /// vector<4x4xf32>, memref<?x?x?x?xf32>, /// index, index, index, index -/// %4 = affine_apply (d0, d1, d2, d3) -> (d0, d1, d2, d3 + 3) +/// %4 = affine.apply (d0, d1, d2, d3) -> (d0, d1, d2, d3 + 3) /// (%i0, %i1, %i2, %i3) /// vector_transfer_write {{.*}}, %0, %4#0, %4#1, %4#2, %4#3 /// {permutation_map: (d0, d1, d2, d3) -> (d1, d0)} : diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp index 9c9db30d163..d9f940a01f3 100644 --- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp +++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp @@ -147,7 +147,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(OpPointer<LoadOp> loadOp) { // common surrounding loop. As an example this filters out cases like: // for %i0 // for %i1 - // %idx = affine_apply (d0) -> (d0 + 1) (%i0) + // %idx = affine.apply (d0) -> (d0 + 1) (%i0) // store %A[%idx] // load %A[%i0] // diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp index 4ca48a53485..cfa045f2279 100644 --- a/mlir/lib/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp @@ -324,7 +324,7 @@ PipelineDataTransfer::runOnAffineForOp(OpPointer<AffineForOp> forOp) { instShiftMap[sliceOp->getInstruction()] = 0; } } else { - // If a slice wasn't created, the reachable affine_apply op's from its + // If a slice wasn't created, the reachable affine.apply op's from its // operands are the ones that go with it. SmallVector<Instruction *, 4> affineApplyInsts; SmallVector<Value *, 4> operands(dmaStartInst->getOperands()); diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp index 95875adca6e..a1903ace026 100644 --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -113,7 +113,7 @@ bool mlir::promoteIfSingleIteration(OpPointer<AffineForOp> forOp) { SmallVector<Value *, 4> lbOperands(lb.operand_begin(), lb.operand_end()); FuncBuilder builder(forInst->getBlock(), Block::iterator(forInst)); if (lb.getMap() == builder.getDimIdentityMap()) { - // No need of generating an affine_apply. + // No need of generating an affine.apply. iv->replaceAllUsesWith(lbOperands[0]); } else { auto affineApplyOp = builder.create<AffineApplyOp>( diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp index 4b6147f447b..41689be52fc 100644 --- a/mlir/lib/Transforms/Utils/Utils.cpp +++ b/mlir/lib/Transforms/Utils/Utils.cpp @@ -195,25 +195,25 @@ bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef, /// Before /// /// for %i = 0 to #map(%N) -/// %idx = affine_apply (d0) -> (d0 mod 2) (%i) +/// %idx = affine.apply (d0) -> (d0 mod 2) (%i) /// "send"(%idx, %A, ...) /// "compute"(%idx) /// /// After /// /// for %i = 0 to #map(%N) -/// %idx = affine_apply (d0) -> (d0 mod 2) (%i) +/// %idx = affine.apply (d0) -> (d0 mod 2) (%i) /// "send"(%idx, %A, ...) -/// %idx_ = affine_apply (d0) -> (d0 mod 2) (%i) +/// %idx_ = affine.apply (d0) -> (d0 mod 2) (%i) /// "compute"(%idx_) /// /// This allows applying different transformations on send and compute (for eg. /// different shifts/delays). /// /// Returns nullptr either if none of opInst's operands were the result of an -/// affine_apply and thus there was no affine computation slice to create, or if -/// all the affine_apply op's supplying operands to this opInst did not have any -/// uses besides this opInst; otherwise returns the list of affine_apply +/// affine.apply and thus there was no affine computation slice to create, or if +/// all the affine.apply op's supplying operands to this opInst did not have any +/// uses besides this opInst; otherwise returns the list of affine.apply /// operations created in output argument `sliceOps`. void mlir::createAffineComputationSlice( Instruction *opInst, SmallVectorImpl<OpPointer<AffineApplyOp>> *sliceOps) { @@ -255,7 +255,7 @@ void mlir::createAffineComputationSlice( auto composedMap = builder.getMultiDimIdentityMap(composedOpOperands.size()); fullyComposeAffineMapAndOperands(&composedMap, &composedOpOperands); - // Create an affine_apply for each of the map results. + // Create an affine.apply for each of the map results. sliceOps->reserve(composedMap.getNumResults()); for (auto resultExpr : composedMap.getResults()) { auto singleResMap = builder.getAffineMap( |

