summaryrefslogtreecommitdiffstats
path: root/mlir/lib
diff options
context:
space:
mode:
authorUday Bondhugula <bondhugula@google.com>2019-01-15 14:41:56 -0800
committerjpienaar <jpienaar@google.com>2019-03-29 15:19:41 -0700
commit03e15e1b9f84a7bed35ff4065ad7135b571d2d65 (patch)
treea5fad41c04d17071289216969017d2aa452e0304 /mlir/lib
parentb7dbfd04ebd80577f38f4922e857ad821b908ff1 (diff)
downloadbcm5719-llvm-03e15e1b9f84a7bed35ff4065ad7135b571d2d65.tar.gz
bcm5719-llvm-03e15e1b9f84a7bed35ff4065ad7135b571d2d65.zip
Minor code cleanup - NFC.
- readability changes PiperOrigin-RevId: 229443430
Diffstat (limited to 'mlir/lib')
-rw-r--r--mlir/lib/Analysis/AffineAnalysis.cpp2
-rw-r--r--mlir/lib/Analysis/Utils.cpp14
-rw-r--r--mlir/lib/IR/BuiltinOps.cpp45
-rw-r--r--mlir/lib/Transforms/DmaGeneration.cpp7
-rw-r--r--mlir/lib/Transforms/LoopFusion.cpp8
-rw-r--r--mlir/lib/Transforms/PipelineDataTransfer.cpp10
-rw-r--r--mlir/lib/Transforms/Utils/Utils.cpp12
7 files changed, 46 insertions, 52 deletions
diff --git a/mlir/lib/Analysis/AffineAnalysis.cpp b/mlir/lib/Analysis/AffineAnalysis.cpp
index cc0071d6b5d..19283b319b6 100644
--- a/mlir/lib/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Analysis/AffineAnalysis.cpp
@@ -1535,7 +1535,7 @@ static void composeAffineMapAndOperands(AffineMap *map,
AffineNormalizer normalizer(*map, *operands);
auto normalizedMap = normalizer.getAffineMap();
auto normalizedOperands = normalizer.getOperands();
- canonicalizeMapAndOperands(normalizedMap, normalizedOperands);
+ canonicalizeMapAndOperands(&normalizedMap, &normalizedOperands);
*map = normalizedMap;
*operands = normalizedOperands;
assert(*map);
diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index 12ac0cc44ec..49e1e31f55d 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -112,22 +112,22 @@ Optional<int64_t> MemRefRegion::getBoundingConstantSizeAndShape(
// (dma_start, dma_wait).
bool mlir::getMemRefRegion(OperationInst *opInst, unsigned loopDepth,
MemRefRegion *region) {
- OpPointer<LoadOp> loadOp;
- OpPointer<StoreOp> storeOp;
unsigned rank;
SmallVector<Value *, 4> indices;
-
- if ((loadOp = opInst->dyn_cast<LoadOp>())) {
+ if (auto loadOp = opInst->dyn_cast<LoadOp>()) {
rank = loadOp->getMemRefType().getRank();
+ indices.reserve(rank);
indices.append(loadOp->getIndices().begin(), loadOp->getIndices().end());
region->memref = loadOp->getMemRef();
region->setWrite(false);
- } else if ((storeOp = opInst->dyn_cast<StoreOp>())) {
+ } else if (auto storeOp = opInst->dyn_cast<StoreOp>()) {
rank = storeOp->getMemRefType().getRank();
+ indices.reserve(rank);
indices.append(storeOp->getIndices().begin(), storeOp->getIndices().end());
region->memref = storeOp->getMemRef();
region->setWrite(true);
} else {
+ assert(false && "expected load or store op");
return false;
}
@@ -191,6 +191,7 @@ bool mlir::getMemRefRegion(OperationInst *opInst, unsigned loopDepth,
// this memref region is symbolic.
SmallVector<ForInst *, 4> outerIVs;
getLoopIVs(*opInst, &outerIVs);
+ assert(loopDepth <= outerIVs.size() && "invalid loop depth");
outerIVs.resize(loopDepth);
for (auto *operand : accessValueMap.getOperands()) {
ForInst *iv;
@@ -249,12 +250,13 @@ bool mlir::boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
static_assert(
std::is_same<LoadOrStoreOpPointer, OpPointer<LoadOp>>::value ||
std::is_same<LoadOrStoreOpPointer, OpPointer<StoreOp>>::value,
- "function argument should be either a LoadOp or a StoreOp");
+ "argument should be either a LoadOp or a StoreOp");
OperationInst *opInst = loadOrStoreOp->getInstruction();
MemRefRegion region;
if (!getMemRefRegion(opInst, /*loopDepth=*/0, &region))
return false;
+
LLVM_DEBUG(llvm::dbgs() << "Memory region");
LLVM_DEBUG(region.getConstraints()->dump());
diff --git a/mlir/lib/IR/BuiltinOps.cpp b/mlir/lib/IR/BuiltinOps.cpp
index 94fa58139af..da570f4b805 100644
--- a/mlir/lib/IR/BuiltinOps.cpp
+++ b/mlir/lib/IR/BuiltinOps.cpp
@@ -198,61 +198,62 @@ struct SimplifyAffineApplyState : public PatternState {
} // end anonymous namespace.
void mlir::canonicalizeMapAndOperands(
- AffineMap &map, llvm::SmallVectorImpl<Value *> &operands) {
- if (!map || operands.empty())
+ AffineMap *map, llvm::SmallVectorImpl<Value *> *operands) {
+ if (!map || operands->empty())
return;
- assert(map.getNumInputs() == operands.size() &&
+ assert(map->getNumInputs() == operands->size() &&
"map inputs must match number of operands");
// Check to see what dims are used.
- llvm::SmallBitVector usedDims(map.getNumDims());
- llvm::SmallBitVector usedSyms(map.getNumSymbols());
- map.walkExprs([&](AffineExpr expr) {
+ llvm::SmallBitVector usedDims(map->getNumDims());
+ llvm::SmallBitVector usedSyms(map->getNumSymbols());
+ map->walkExprs([&](AffineExpr expr) {
if (auto dimExpr = expr.dyn_cast<AffineDimExpr>())
usedDims[dimExpr.getPosition()] = true;
else if (auto symExpr = expr.dyn_cast<AffineSymbolExpr>())
usedSyms[symExpr.getPosition()] = true;
});
- auto *context = map.getContext();
+ auto *context = map->getContext();
SmallVector<Value *, 8> resultOperands;
- resultOperands.reserve(operands.size());
+ resultOperands.reserve(operands->size());
llvm::SmallDenseMap<Value *, AffineExpr, 8> seenDims;
- SmallVector<AffineExpr, 8> dimRemapping(map.getNumDims());
+ SmallVector<AffineExpr, 8> dimRemapping(map->getNumDims());
unsigned nextDim = 0;
- for (unsigned i = 0, e = map.getNumDims(); i != e; ++i) {
+ for (unsigned i = 0, e = map->getNumDims(); i != e; ++i) {
if (usedDims[i]) {
- auto it = seenDims.find(operands[i]);
+ auto it = seenDims.find((*operands)[i]);
if (it == seenDims.end()) {
dimRemapping[i] = getAffineDimExpr(nextDim++, context);
- resultOperands.push_back(operands[i]);
- seenDims.insert(std::make_pair(operands[i], dimRemapping[i]));
+ resultOperands.push_back((*operands)[i]);
+ seenDims.insert(std::make_pair((*operands)[i], dimRemapping[i]));
} else {
dimRemapping[i] = it->second;
}
}
}
llvm::SmallDenseMap<Value *, AffineExpr, 8> seenSymbols;
- SmallVector<AffineExpr, 8> symRemapping(map.getNumSymbols());
+ SmallVector<AffineExpr, 8> symRemapping(map->getNumSymbols());
unsigned nextSym = 0;
- for (unsigned i = 0, e = map.getNumSymbols(); i != e; ++i) {
+ for (unsigned i = 0, e = map->getNumSymbols(); i != e; ++i) {
if (usedSyms[i]) {
- auto it = seenSymbols.find(operands[i + map.getNumDims()]);
+ auto it = seenSymbols.find((*operands)[i + map->getNumDims()]);
if (it == seenSymbols.end()) {
symRemapping[i] = getAffineSymbolExpr(nextSym++, context);
- resultOperands.push_back(operands[i + map.getNumDims()]);
- seenSymbols.insert(
- std::make_pair(operands[i + map.getNumDims()], symRemapping[i]));
+ resultOperands.push_back((*operands)[i + map->getNumDims()]);
+ seenSymbols.insert(std::make_pair((*operands)[i + map->getNumDims()],
+ symRemapping[i]));
} else {
symRemapping[i] = it->second;
}
}
}
- map = map.replaceDimsAndSymbols(dimRemapping, symRemapping, nextDim, nextSym);
- operands = resultOperands;
+ *map =
+ map->replaceDimsAndSymbols(dimRemapping, symRemapping, nextDim, nextSym);
+ *operands = resultOperands;
}
PatternMatchResult SimplifyAffineApply::match(OperationInst *op) const {
@@ -262,7 +263,7 @@ PatternMatchResult SimplifyAffineApply::match(OperationInst *op) const {
AffineMap oldMap = map;
SmallVector<Value *, 8> resultOperands(apply->getOperands().begin(),
apply->getOperands().end());
- canonicalizeMapAndOperands(map, resultOperands);
+ canonicalizeMapAndOperands(&map, &resultOperands);
if (map != oldMap)
return matchSuccess(
std::make_unique<SimplifyAffineApplyState>(map, resultOperands));
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index e60f3531b62..df4aa84b039 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -223,13 +223,8 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, ForInst *forInst,
// on; this would correspond to loop IVs surrounding the level at which the
// DMA generation is being done.
const FlatAffineConstraints *cst = region.getConstraints();
- auto ids = cst->getIds();
SmallVector<Value *, 8> outerIVs;
- for (unsigned i = rank, e = ids.size(); i < e; i++) {
- auto id = cst->getIds()[i];
- assert(id.hasValue() && "Value id expected");
- outerIVs.push_back(id.getValue());
- }
+ cst->getIdValues(rank, cst->getNumIds(), &outerIVs);
// Construct the index expressions for the fast memory buffer. The index
// expression for a particular dimension of the fast buffer is obtained by
diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index dffa292af3c..c097473de3f 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -694,12 +694,12 @@ static bool isFusionProfitable(FusionCandidate *candidate,
for (unsigned i = 0; i < numSrcLoopIVs; ++i) {
if (i < bestSrcLoopDepth) {
if (sliceState->lbs[i] != AffineMap::Null()) {
- canonicalizeMapAndOperands(sliceState->lbs[i],
- sliceState->lbOperands[i]);
+ canonicalizeMapAndOperands(&sliceState->lbs[i],
+ &sliceState->lbOperands[i]);
}
if (sliceState->ubs[i] != AffineMap::Null()) {
- canonicalizeMapAndOperands(sliceState->ubs[i],
- sliceState->ubOperands[i]);
+ canonicalizeMapAndOperands(&sliceState->ubs[i],
+ &sliceState->ubOperands[i]);
}
} else {
sliceState->lbs[i] = AffineMap::Null();
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index 495c9c181fd..989af0071d7 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -87,12 +87,12 @@ static bool doubleBuffer(Value *oldMemRef, ForInst *forInst) {
// Doubles the shape with a leading dimension extent of 2.
auto doubleShape = [&](MemRefType oldMemRefType) -> MemRefType {
// Add the leading dimension in the shape for the double buffer.
- ArrayRef<int> shape = oldMemRefType.getShape();
- SmallVector<int, 4> shapeSizes(shape.begin(), shape.end());
- shapeSizes.insert(shapeSizes.begin(), 2);
-
+ ArrayRef<int> oldShape = oldMemRefType.getShape();
+ SmallVector<int, 4> newShape(1 + oldMemRefType.getRank());
+ newShape[0] = 2;
+ std::copy(oldShape.begin(), oldShape.end(), newShape.begin() + 1);
auto newMemRefType =
- bInner.getMemRefType(shapeSizes, oldMemRefType.getElementType(), {},
+ bInner.getMemRefType(newShape, oldMemRefType.getElementType(), {},
oldMemRefType.getMemorySpace());
return newMemRefType;
};
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index f85847ff066..4f4aeabb26d 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -123,9 +123,6 @@ bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef,
FuncBuilder builder(opInst);
for (auto *extraIndex : extraIndices) {
- // TODO(mlir-team): An operation/SSA value should provide a method to
- // return the position of an SSA result in its defining
- // operation.
assert(extraIndex->getDefiningInst()->getNumResults() == 1 &&
"single result op's expected to generate these indices");
assert((extraIndex->isValidDim() || extraIndex->isValidSymbol()) &&
@@ -137,7 +134,7 @@ bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef,
// provided. The indices of a memref come right after it, i.e.,
// at position memRefOperandPos + 1.
SmallVector<Value *, 4> remapOperands;
- remapOperands.reserve(oldMemRefRank + extraOperands.size());
+ remapOperands.reserve(extraOperands.size() + oldMemRefRank);
remapOperands.append(extraOperands.begin(), extraOperands.end());
remapOperands.append(opInst->operand_begin() + memRefOperandPos + 1,
opInst->operand_begin() + memRefOperandPos + 1 +
@@ -146,12 +143,11 @@ bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef,
auto remapOp = builder.create<AffineApplyOp>(opInst->getLoc(), indexRemap,
remapOperands);
// Remapped indices.
- for (auto *index : remapOp->getInstruction()->getResults())
- state.operands.push_back(index);
+ state.operands.append(remapOp->getInstruction()->result_begin(),
+ remapOp->getInstruction()->result_end());
} else {
// No remapping specified.
- for (auto *index : remapOperands)
- state.operands.push_back(index);
+ state.operands.append(remapOperands.begin(), remapOperands.end());
}
// Insert the remaining operands unmodified.
OpenPOWER on IntegriCloud