summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Transforms/Utils
diff options
context:
space:
mode:
authorUday Bondhugula <udayb@iisc.ac.in>2019-09-03 12:13:59 -0700
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-09-03 12:14:28 -0700
commit54d674f51e525e9186dc1f7d7df1b4d8e757e4ad (patch)
treecc2e6f987f9c5a110d7c163e3be7a10e73a79ffd /mlir/lib/Transforms/Utils
parent5593e005c676609c4b9bbf332fe894784d98d09f (diff)
downloadbcm5719-llvm-54d674f51e525e9186dc1f7d7df1b4d8e757e4ad.tar.gz
bcm5719-llvm-54d674f51e525e9186dc1f7d7df1b4d8e757e4ad.zip
Utility to normalize memrefs with non-identity layout maps
- introduce utility to convert memrefs with non-identity layout maps to ones with identity layout maps: convert the type and rewrite/remap all its uses - add this utility to -simplify-affine-structures pass for testing purposes Signed-off-by: Uday Bondhugula <uday@polymagelabs.com> Closes tensorflow/mlir#104 COPYBARA_INTEGRATE_REVIEW=https://github.com/tensorflow/mlir/pull/104 from bondhugula:memref-normalize f2c914aa1890e8860326c9e33f9aa160b3d65e6d PiperOrigin-RevId: 266985317
Diffstat (limited to 'mlir/lib/Transforms/Utils')
-rw-r--r--mlir/lib/Transforms/Utils/Utils.cpp79
1 files changed, 79 insertions, 0 deletions
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index b0c9b942352..d3442c0a542 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -388,3 +388,82 @@ void mlir::createAffineComputationSlice(
opInst->setOperand(idx, newOperands[idx]);
}
}
+
+// TODO: Currently works for static memrefs with single non-identity layout map.
+LogicalResult mlir::normalizeMemRef(AllocOp allocOp) {
+ MemRefType memrefType = allocOp.getType();
+ unsigned rank = memrefType.getRank();
+ if (rank == 0)
+ return success();
+
+ auto layoutMaps = memrefType.getAffineMaps();
+ OpBuilder b(allocOp);
+ if (layoutMaps.size() != 1)
+ return failure();
+
+ AffineMap layoutMap = layoutMaps.front();
+
+ if (layoutMap == b.getMultiDimIdentityMap(rank))
+ return success();
+
+ if (layoutMap.getNumResults() < rank)
+ // This is a sufficient condition for not being one-to-one; the map is thus
+ // invalid. Leave it alone. (Undefined behavior?)
+ return failure();
+
+ // We don't do any more non-trivial checks for one-to-one'ness; we
+ // assume that it is one-to-one.
+
+ // TODO: Only for static memref's for now.
+ if (memrefType.getNumDynamicDims() > 0)
+ return failure();
+
+ // We have a single map that is not an identity map. Create a new memref with
+ // the right shape and an identity layout map.
+ auto shape = memrefType.getShape();
+ FlatAffineConstraints fac(rank, 0);
+ for (unsigned d = 0; d < rank; ++d) {
+ fac.addConstantLowerBound(d, 0);
+ fac.addConstantUpperBound(d, shape[d] - 1);
+ }
+
+ // We compose this map with the original index (logical) space to derive the
+ // upper bounds for the new index space.
+ unsigned newRank = layoutMap.getNumResults();
+ fac.composeMatchingMap(layoutMap);
+ // Project out the old data dimensions.
+ fac.projectOut(newRank, fac.getNumIds() - newRank - fac.getNumLocalIds());
+ SmallVector<int64_t, 4> newShape(newRank);
+ for (unsigned d = 0; d < newRank; ++d) {
+ // The lower bound for the shape is always zero.
+ auto ubConst = fac.getConstantUpperBound(d);
+ // For a static memref and an affine map with no symbols, this is always
+ // bounded.
+ assert(ubConst.hasValue() && "should always have an upper bound");
+ if (ubConst.getValue() < 0)
+ // This is due to an invalid map that maps to a negative space.
+ return failure();
+ newShape[d] = ubConst.getValue() + 1;
+ }
+
+ auto *oldMemRef = allocOp.getResult();
+ auto newMemRefType = b.getMemRefType(newShape, memrefType.getElementType(),
+ b.getMultiDimIdentityMap(newRank));
+ auto newAlloc = b.create<AllocOp>(allocOp.getLoc(), newMemRefType);
+
+ // Replace all uses of the old memref.
+ if (failed(replaceAllMemRefUsesWith(oldMemRef, /*newMemRef=*/newAlloc,
+ /*extraIndices=*/{},
+ /*indexRemap=*/layoutMap))) {
+ // If it failed (due to escapes for example), bail out.
+ newAlloc.erase();
+ return failure();
+ }
+ // Replace any uses of the original alloc op and erase it. All remaining uses
+ // have to be dealloc's; RAMUW above would've failed otherwise.
+ assert(std::all_of(oldMemRef->user_begin(), oldMemRef->user_end(),
+ [](Operation *op) { return isa<DeallocOp>(op); }));
+ oldMemRef->replaceAllUsesWith(newAlloc);
+ allocOp.erase();
+ return success();
+}
OpenPOWER on IntegriCloud