summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Transforms
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-05-11 15:17:28 -0700
committerMehdi Amini <joker.eph@gmail.com>2019-05-20 13:37:10 -0700
commit02e03b9bf4a1fe60b89d4bd662895ebcc374129b (patch)
treef300b5e0886e01bd59c6fb7d7042b4e1fbf9ff3d /mlir/lib/Transforms
parent360f8a209e21b058cc20949fc8600817b0a1044c (diff)
downloadbcm5719-llvm-02e03b9bf4a1fe60b89d4bd662895ebcc374129b.tar.gz
bcm5719-llvm-02e03b9bf4a1fe60b89d4bd662895ebcc374129b.zip
Add support for using llvm::dyn_cast/cast/isa for operation casts and replace usages of Operation::dyn_cast with llvm::dyn_cast.
-- PiperOrigin-RevId: 247778391
Diffstat (limited to 'mlir/lib/Transforms')
-rw-r--r--mlir/lib/Transforms/DmaGeneration.cpp12
-rw-r--r--mlir/lib/Transforms/LoopFusion.cpp8
-rw-r--r--mlir/lib/Transforms/LoopTiling.cpp2
-rw-r--r--mlir/lib/Transforms/LoopUnrollAndJam.cpp2
-rw-r--r--mlir/lib/Transforms/LowerAffine.cpp4
-rw-r--r--mlir/lib/Transforms/MaterializeVectors.cpp4
-rw-r--r--mlir/lib/Transforms/MemRefDataFlowOpt.cpp2
-rw-r--r--mlir/lib/Transforms/PipelineDataTransfer.cpp4
-rw-r--r--mlir/lib/Transforms/TestConstantFold.cpp2
-rw-r--r--mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp2
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp2
-rw-r--r--mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp2
-rw-r--r--mlir/lib/Transforms/Vectorize.cpp8
13 files changed, 27 insertions, 27 deletions
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index 10f47fe9be1..937399cc703 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -173,11 +173,11 @@ static void getMultiLevelStrides(const MemRefRegion &region,
static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs,
MemRefRegion *region) {
unsigned rank;
- if (auto loadOp = opInst->dyn_cast<LoadOp>()) {
+ if (auto loadOp = dyn_cast<LoadOp>(opInst)) {
rank = loadOp.getMemRefType().getRank();
region->memref = loadOp.getMemRef();
region->setWrite(false);
- } else if (auto storeOp = opInst->dyn_cast<StoreOp>()) {
+ } else if (auto storeOp = dyn_cast<StoreOp>(opInst)) {
rank = storeOp.getMemRefType().getRank();
region->memref = storeOp.getMemRef();
region->setWrite(true);
@@ -483,7 +483,7 @@ bool DmaGeneration::runOnBlock(Block *block) {
});
for (auto it = curBegin; it != block->end(); ++it) {
- if (auto forOp = it->dyn_cast<AffineForOp>()) {
+ if (auto forOp = dyn_cast<AffineForOp>(&*it)) {
// Returns true if the footprint is known to exceed capacity.
auto exceedsCapacity = [&](AffineForOp forOp) {
Optional<int64_t> footprint =
@@ -607,10 +607,10 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) {
// Walk this range of operations to gather all memory regions.
block->walk(begin, end, [&](Operation *opInst) {
// Gather regions to allocate to buffers in faster memory space.
- if (auto loadOp = opInst->dyn_cast<LoadOp>()) {
+ if (auto loadOp = dyn_cast<LoadOp>(opInst)) {
if (loadOp.getMemRefType().getMemorySpace() != slowMemorySpace)
return;
- } else if (auto storeOp = opInst->dyn_cast<StoreOp>()) {
+ } else if (auto storeOp = dyn_cast<StoreOp>(opInst)) {
if (storeOp.getMemRefType().getMemorySpace() != slowMemorySpace)
return;
} else {
@@ -739,7 +739,7 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) {
// For a range of operations, a note will be emitted at the caller.
AffineForOp forOp;
uint64_t sizeInKib = llvm::divideCeil(totalDmaBuffersSizeInBytes, 1024);
- if (llvm::DebugFlag && (forOp = begin->dyn_cast<AffineForOp>())) {
+ if (llvm::DebugFlag && (forOp = dyn_cast<AffineForOp>(&*begin))) {
forOp.emitRemark()
<< sizeInKib
<< " KiB of DMA buffers in fast memory space for this block\n";
diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index 796d2164ad9..1c4a4d1f755 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -644,7 +644,7 @@ bool MemRefDependenceGraph::init(Function &f) {
DenseMap<Operation *, unsigned> forToNodeMap;
for (auto &op : f.front()) {
- if (auto forOp = op.dyn_cast<AffineForOp>()) {
+ if (auto forOp = dyn_cast<AffineForOp>(op)) {
// Create graph node 'id' to represent top-level 'forOp' and record
// all loads and store accesses it contains.
LoopNestStateCollector collector;
@@ -666,14 +666,14 @@ bool MemRefDependenceGraph::init(Function &f) {
}
forToNodeMap[&op] = node.id;
nodes.insert({node.id, node});
- } else if (auto loadOp = op.dyn_cast<LoadOp>()) {
+ } else if (auto loadOp = dyn_cast<LoadOp>(op)) {
// Create graph node for top-level load op.
Node node(nextNodeId++, &op);
node.loads.push_back(&op);
auto *memref = op.cast<LoadOp>().getMemRef();
memrefAccesses[memref].insert(node.id);
nodes.insert({node.id, node});
- } else if (auto storeOp = op.dyn_cast<StoreOp>()) {
+ } else if (auto storeOp = dyn_cast<StoreOp>(op)) {
// Create graph node for top-level store op.
Node node(nextNodeId++, &op);
node.stores.push_back(&op);
@@ -2125,7 +2125,7 @@ public:
auto *fn = dstNode->op->getFunction();
for (unsigned i = 0, e = fn->getNumArguments(); i != e; ++i) {
for (auto &use : fn->getArgument(i)->getUses()) {
- if (auto loadOp = use.getOwner()->dyn_cast<LoadOp>()) {
+ if (auto loadOp = dyn_cast<LoadOp>(use.getOwner())) {
// Gather loops surrounding 'use'.
SmallVector<AffineForOp, 4> loops;
getLoopIVs(*use.getOwner(), &loops);
diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp
index ce42a5eba85..28e13d89ada 100644
--- a/mlir/lib/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Transforms/LoopTiling.cpp
@@ -273,7 +273,7 @@ static void getTileableBands(Function &f,
for (auto &block : f)
for (auto &op : block)
- if (auto forOp = op.dyn_cast<AffineForOp>())
+ if (auto forOp = dyn_cast<AffineForOp>(op))
getMaximalPerfectLoopNest(forOp);
}
diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
index 366a7ede5eb..0a23295c8d9 100644
--- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
@@ -92,7 +92,7 @@ void LoopUnrollAndJam::runOnFunction() {
// unroll-and-jammed by this pass. However, runOnAffineForOp can be called on
// any for operation.
auto &entryBlock = getFunction().front();
- if (auto forOp = entryBlock.front().dyn_cast<AffineForOp>())
+ if (auto forOp = dyn_cast<AffineForOp>(entryBlock.front()))
runOnAffineForOp(forOp);
}
diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp
index dc389c8e37a..1ffe5e3ddd7 100644
--- a/mlir/lib/Transforms/LowerAffine.cpp
+++ b/mlir/lib/Transforms/LowerAffine.cpp
@@ -620,10 +620,10 @@ void LowerAffinePass::runOnFunction() {
// Rewrite all of the ifs and fors. We walked the operations in postorders,
// so we know that we will rewrite them in the reverse order.
for (auto *op : llvm::reverse(instsToRewrite)) {
- if (auto ifOp = op->dyn_cast<AffineIfOp>()) {
+ if (auto ifOp = dyn_cast<AffineIfOp>(op)) {
if (lowerAffineIf(ifOp))
return signalPassFailure();
- } else if (auto forOp = op->dyn_cast<AffineForOp>()) {
+ } else if (auto forOp = dyn_cast<AffineForOp>(op)) {
if (lowerAffineFor(forOp))
return signalPassFailure();
} else if (lowerAffineApply(op->cast<AffineApplyOp>())) {
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 2f06a9aa3bf..28dfb2278e0 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -556,12 +556,12 @@ static bool instantiateMaterialization(Operation *op,
if (op->getNumRegions() != 0)
return op->emitError("NYI path Op with region"), true;
- if (auto write = op->dyn_cast<VectorTransferWriteOp>()) {
+ if (auto write = dyn_cast<VectorTransferWriteOp>(op)) {
auto *clone = instantiate(&b, write, state->hwVectorType,
state->hwVectorInstance, state->substitutionsMap);
return clone == nullptr;
}
- if (auto read = op->dyn_cast<VectorTransferReadOp>()) {
+ if (auto read = dyn_cast<VectorTransferReadOp>(op)) {
auto *clone = instantiate(&b, read, state->hwVectorType,
state->hwVectorInstance, state->substitutionsMap);
if (!clone) {
diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
index a63d462c4a9..94df936c93f 100644
--- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
+++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
@@ -103,7 +103,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(LoadOp loadOp) {
SmallVector<Operation *, 8> storeOps;
unsigned minSurroundingLoops = getNestingDepth(*loadOpInst);
for (auto &use : loadOp.getMemRef()->getUses()) {
- auto storeOp = use.getOwner()->dyn_cast<StoreOp>();
+ auto storeOp = dyn_cast<StoreOp>(use.getOwner());
if (!storeOp)
continue;
auto *storeOpInst = storeOp.getOperation();
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index 66fbf4a1306..0da97f7d169 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -181,7 +181,7 @@ static void findMatchingStartFinishInsts(
// Collect outgoing DMA operations - needed to check for dependences below.
SmallVector<DmaStartOp, 4> outgoingDmaOps;
for (auto &op : *forOp.getBody()) {
- auto dmaStartOp = op.dyn_cast<DmaStartOp>();
+ auto dmaStartOp = dyn_cast<DmaStartOp>(op);
if (dmaStartOp && dmaStartOp.isSrcMemorySpaceFaster())
outgoingDmaOps.push_back(dmaStartOp);
}
@@ -193,7 +193,7 @@ static void findMatchingStartFinishInsts(
dmaFinishInsts.push_back(&op);
continue;
}
- auto dmaStartOp = op.dyn_cast<DmaStartOp>();
+ auto dmaStartOp = dyn_cast<DmaStartOp>(op);
if (!dmaStartOp)
continue;
diff --git a/mlir/lib/Transforms/TestConstantFold.cpp b/mlir/lib/Transforms/TestConstantFold.cpp
index 0990d7a73f6..ec1e971973e 100644
--- a/mlir/lib/Transforms/TestConstantFold.cpp
+++ b/mlir/lib/Transforms/TestConstantFold.cpp
@@ -48,7 +48,7 @@ void TestConstantFold::foldOperation(Operation *op,
}
// If this op is a constant that are used and cannot be de-duplicated,
// remember it for cleanup later.
- else if (auto constant = op->dyn_cast<ConstantOp>()) {
+ else if (auto constant = dyn_cast<ConstantOp>(op)) {
existingConstants.push_back(op);
}
}
diff --git a/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp b/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp
index fc8209be872..b907840b27d 100644
--- a/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp
+++ b/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp
@@ -40,7 +40,7 @@ bool ConstantFoldHelper::tryToConstantFold(
// into the value it contains. We need to consider constants before the
// constant folding logic to avoid re-creating the same constant later.
// TODO: Extend to support dialect-specific constant ops.
- if (auto constant = op->dyn_cast<ConstantOp>()) {
+ if (auto constant = dyn_cast<ConstantOp>(op)) {
// If this constant is dead, update bookkeeping and signal the caller.
if (constant.use_empty()) {
notifyRemoval(op);
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index a10e4a1ae49..7fbb48ecf99 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -363,7 +363,7 @@ void mlir::getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops,
nestedLoops.push_back(curr);
auto *currBody = curr.getBody();
while (currBody->begin() == std::prev(currBody->end(), 2) &&
- (curr = curr.getBody()->front().dyn_cast<AffineForOp>())) {
+ (curr = dyn_cast<AffineForOp>(curr.getBody()->front()))) {
nestedLoops.push_back(curr);
currBody = curr.getBody();
}
diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
index 753f7cf750f..b64dc53e037 100644
--- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
+++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
@@ -234,7 +234,7 @@ void VectorizerTestPass::testComposeMaps(llvm::raw_ostream &outs) {
static bool affineApplyOp(Operation &op) { return op.isa<AffineApplyOp>(); }
static bool singleResultAffineApplyOpWithoutUses(Operation &op) {
- auto app = op.dyn_cast<AffineApplyOp>();
+ auto app = dyn_cast<AffineApplyOp>(op);
return app && app.use_empty();
}
diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp
index 025a6535a78..9b8768a6445 100644
--- a/mlir/lib/Transforms/Vectorize.cpp
+++ b/mlir/lib/Transforms/Vectorize.cpp
@@ -839,8 +839,8 @@ static LogicalResult vectorizeAffineForOp(AffineForOp loop, int64_t step,
loadAndStores.match(loop.getOperation(), &loadAndStoresMatches);
for (auto ls : loadAndStoresMatches) {
auto *opInst = ls.getMatchedOperation();
- auto load = opInst->dyn_cast<LoadOp>();
- auto store = opInst->dyn_cast<StoreOp>();
+ auto load = dyn_cast<LoadOp>(opInst);
+ auto store = dyn_cast<StoreOp>(opInst);
LLVM_DEBUG(opInst->print(dbgs()));
LogicalResult result =
load ? vectorizeRootOrTerminal(loop.getInductionVar(), load, state)
@@ -982,7 +982,7 @@ static Value *vectorizeOperand(Value *operand, Operation *op,
return nullptr;
}
// 3. vectorize constant.
- if (auto constant = operand->getDefiningOp()->dyn_cast<ConstantOp>()) {
+ if (auto constant = dyn_cast<ConstantOp>(operand->getDefiningOp())) {
return vectorizeConstant(
op, constant,
VectorType::get(state->strategy->vectorSizes, operand->getType()));
@@ -1012,7 +1012,7 @@ static Operation *vectorizeOneOperation(Operation *opInst,
assert(!opInst->isa<VectorTransferWriteOp>() &&
"vector.transfer_write cannot be further vectorized");
- if (auto store = opInst->dyn_cast<StoreOp>()) {
+ if (auto store = dyn_cast<StoreOp>(opInst)) {
auto *memRef = store.getMemRef();
auto *value = store.getValueToStore();
auto *vectorValue = vectorizeOperand(value, opInst, state);
OpenPOWER on IntegriCloud