summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Transforms/Utils
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2020-01-11 08:54:04 -0800
committerRiver Riddle <riverriddle@google.com>2020-01-11 08:54:39 -0800
commit2bdf33cc4c733342fc83081bc7410ac5e9a24f55 (patch)
tree3306d769c2bbabda1060928e0cea79d021ea9da2 /mlir/lib/Transforms/Utils
parent1d641daf260308815d014d1bf1b424a1ed1e7277 (diff)
downloadbcm5719-llvm-2bdf33cc4c733342fc83081bc7410ac5e9a24f55.tar.gz
bcm5719-llvm-2bdf33cc4c733342fc83081bc7410ac5e9a24f55.zip
[mlir] NFC: Remove Value::operator* and Value::operator-> now that Value is properly value-typed.
Summary: These were temporary methods used to simplify the transition. Reviewed By: antiagainst Differential Revision: https://reviews.llvm.org/D72548
Diffstat (limited to 'mlir/lib/Transforms/Utils')
-rw-r--r--mlir/lib/Transforms/Utils/FoldUtils.cpp6
-rw-r--r--mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp8
-rw-r--r--mlir/lib/Transforms/Utils/InliningUtils.cpp20
-rw-r--r--mlir/lib/Transforms/Utils/LoopFusionUtils.cpp4
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp37
-rw-r--r--mlir/lib/Transforms/Utils/RegionUtils.cpp6
-rw-r--r--mlir/lib/Transforms/Utils/Utils.cpp36
7 files changed, 58 insertions, 59 deletions
diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp
index 719c6fac731..a96545c0b24 100644
--- a/mlir/lib/Transforms/Utils/FoldUtils.cpp
+++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp
@@ -97,7 +97,7 @@ LogicalResult OperationFolder::tryToFold(
// Otherwise, replace all of the result values and erase the operation.
for (unsigned i = 0, e = results.size(); i != e; ++i)
- op->getResult(i)->replaceAllUsesWith(results[i]);
+ op->getResult(i).replaceAllUsesWith(results[i]);
op->erase();
return success();
}
@@ -120,7 +120,7 @@ void OperationFolder::notifyRemoval(Operation *op) {
auto &uniquedConstants = foldScopes[getInsertionRegion(interfaces, op)];
// Erase all of the references to this operation.
- auto type = op->getResult(0)->getType();
+ auto type = op->getResult(0).getType();
for (auto *dialect : it->second)
uniquedConstants.erase(std::make_tuple(dialect, constValue, type));
referencedDialects.erase(it);
@@ -182,7 +182,7 @@ LogicalResult OperationFolder::tryToFold(
Attribute attrRepl = foldResults[i].get<Attribute>();
if (auto *constOp =
tryGetOrCreateConstant(uniquedConstants, dialect, builder, attrRepl,
- res->getType(), op->getLoc())) {
+ res.getType(), op->getLoc())) {
results.push_back(constOp->getResult(0));
continue;
}
diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
index 1eb9c57639a..24e1f5eabd7 100644
--- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
+++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
@@ -99,7 +99,7 @@ protected:
// before the root is changed.
void notifyRootReplaced(Operation *op) override {
for (auto result : op->getResults())
- for (auto *user : result->getUsers())
+ for (auto *user : result.getUsers())
addToWorklist(user);
}
@@ -115,9 +115,9 @@ private:
// TODO(riverriddle) This is based on the fact that zero use operations
// may be deleted, and that single use values often have more
// canonicalization opportunities.
- if (!operand->use_empty() && !operand->hasOneUse())
+ if (!operand.use_empty() && !operand.hasOneUse())
continue;
- if (auto *defInst = operand->getDefiningOp())
+ if (auto *defInst = operand.getDefiningOp())
addToWorklist(defInst);
}
}
@@ -181,7 +181,7 @@ bool GreedyPatternRewriteDriver::simplify(MutableArrayRef<Region> regions,
// Add all the users of the result to the worklist so we make sure
// to revisit them.
for (auto result : op->getResults())
- for (auto *operand : result->getUsers())
+ for (auto *operand : result.getUsers())
addToWorklist(operand);
notifyOperationRemoved(op);
diff --git a/mlir/lib/Transforms/Utils/InliningUtils.cpp b/mlir/lib/Transforms/Utils/InliningUtils.cpp
index 1ac286c67fb..64591209dce 100644
--- a/mlir/lib/Transforms/Utils/InliningUtils.cpp
+++ b/mlir/lib/Transforms/Utils/InliningUtils.cpp
@@ -199,8 +199,8 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
// Otherwise, there were multiple blocks inlined. Add arguments to the post
// insertion block to represent the results to replace.
for (Value resultToRepl : resultsToReplace) {
- resultToRepl->replaceAllUsesWith(
- postInsertBlock->addArgument(resultToRepl->getType()));
+ resultToRepl.replaceAllUsesWith(
+ postInsertBlock->addArgument(resultToRepl.getType()));
}
/// Handle the terminators for each of the new blocks.
@@ -238,7 +238,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
// Verify that the types of the provided values match the function argument
// types.
BlockArgument regionArg = entryBlock->getArgument(i);
- if (inlinedOperands[i]->getType() != regionArg->getType())
+ if (inlinedOperands[i].getType() != regionArg.getType())
return failure();
mapper.map(regionArg, inlinedOperands[i]);
}
@@ -302,7 +302,7 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface,
// Functor used to cleanup generated state on failure.
auto cleanupState = [&] {
for (auto *op : castOps) {
- op->getResult(0)->replaceAllUsesWith(op->getOperand(0));
+ op->getResult(0).replaceAllUsesWith(op->getOperand(0));
op->erase();
}
return failure();
@@ -321,8 +321,8 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface,
// If the call operand doesn't match the expected region argument, try to
// generate a cast.
- Type regionArgType = regionArg->getType();
- if (operand->getType() != regionArgType) {
+ Type regionArgType = regionArg.getType();
+ if (operand.getType() != regionArgType) {
if (!(operand = materializeConversion(callInterface, castOps, castBuilder,
operand, regionArgType, castLoc)))
return cleanupState();
@@ -334,18 +334,18 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface,
castBuilder.setInsertionPointAfter(call);
for (unsigned i = 0, e = callResults.size(); i != e; ++i) {
Value callResult = callResults[i];
- if (callResult->getType() == callableResultTypes[i])
+ if (callResult.getType() == callableResultTypes[i])
continue;
// Generate a conversion that will produce the original type, so that the IR
// is still valid after the original call gets replaced.
Value castResult =
materializeConversion(callInterface, castOps, castBuilder, callResult,
- callResult->getType(), castLoc);
+ callResult.getType(), castLoc);
if (!castResult)
return cleanupState();
- callResult->replaceAllUsesWith(castResult);
- castResult->getDefiningOp()->replaceUsesOfWith(castResult, callResult);
+ callResult.replaceAllUsesWith(castResult);
+ castResult.getDefiningOp()->replaceUsesOfWith(castResult, callResult);
}
// Attempt to inline the call.
diff --git a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
index b0d9fdf5fd8..21603113ec9 100644
--- a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
@@ -113,7 +113,7 @@ static Operation *getLastDependentOpInRange(Operation *opA, Operation *opB) {
return WalkResult::advance();
}
for (auto value : op->getResults()) {
- for (auto user : value->getUsers()) {
+ for (auto user : value.getUsers()) {
SmallVector<AffineForOp, 4> loops;
// Check if any loop in loop nest surrounding 'user' is 'opB'.
getLoopIVs(*user, &loops);
@@ -447,7 +447,7 @@ bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats,
// Subtract out any load users of 'storeMemrefs' nested below
// 'insertPointParent'.
for (auto value : storeMemrefs) {
- for (auto *user : value->getUsers()) {
+ for (auto *user : value.getUsers()) {
if (auto loadOp = dyn_cast<AffineLoadOp>(user)) {
SmallVector<AffineForOp, 4> loops;
// Check if any loop in loop nest surrounding 'user' is
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 0fece54132a..9d7e7cddb05 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -97,9 +97,8 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
canonicalizeMapAndOperands(map, operands);
// Remove any affine.apply's that became dead from the simplification above.
for (auto v : bumpValues) {
- if (v->use_empty()) {
- v->getDefiningOp()->erase();
- }
+ if (v.use_empty())
+ v.getDefiningOp()->erase();
}
if (lb.use_empty())
lb.erase();
@@ -120,23 +119,23 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
// Replaces all IV uses to its single iteration value.
auto iv = forOp.getInductionVar();
Operation *op = forOp.getOperation();
- if (!iv->use_empty()) {
+ if (!iv.use_empty()) {
if (forOp.hasConstantLowerBound()) {
OpBuilder topBuilder(op->getParentOfType<FuncOp>().getBody());
auto constOp = topBuilder.create<ConstantIndexOp>(
forOp.getLoc(), forOp.getConstantLowerBound());
- iv->replaceAllUsesWith(constOp);
+ iv.replaceAllUsesWith(constOp);
} else {
AffineBound lb = forOp.getLowerBound();
SmallVector<Value, 4> lbOperands(lb.operand_begin(), lb.operand_end());
OpBuilder builder(op->getBlock(), Block::iterator(op));
if (lb.getMap() == builder.getDimIdentityMap()) {
// No need of generating an affine.apply.
- iv->replaceAllUsesWith(lbOperands[0]);
+ iv.replaceAllUsesWith(lbOperands[0]);
} else {
auto affineApplyOp = builder.create<AffineApplyOp>(
op->getLoc(), lb.getMap(), lbOperands);
- iv->replaceAllUsesWith(affineApplyOp);
+ iv.replaceAllUsesWith(affineApplyOp);
}
}
}
@@ -192,7 +191,7 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
// remapped to results of cloned operations, and their IV used remapped.
// Generate the remapping if the shift is not zero: remappedIV = newIV -
// shift.
- if (!srcIV->use_empty() && shift != 0) {
+ if (!srcIV.use_empty() && shift != 0) {
auto ivRemap = bodyBuilder.create<AffineApplyOp>(
srcForInst.getLoc(),
bodyBuilder.getSingleDimShiftAffineMap(
@@ -474,7 +473,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
// If the induction variable is used, create a remapping to the value for
// this unrolled instance.
- if (!forOpIV->use_empty()) {
+ if (!forOpIV.use_empty()) {
// iv' = iv + 1/2/3...unrollFactor-1;
auto d0 = builder.getAffineDimExpr(0);
auto bumpMap = AffineMap::get(1, 0, {d0 + i * step});
@@ -835,7 +834,7 @@ Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef<Value> sizes) {
static Value ceilDivPositive(OpBuilder &builder, Location loc, Value dividend,
int64_t divisor) {
assert(divisor > 0 && "expected positive divisor");
- assert(dividend->getType().isIndex() && "expected index-typed value");
+ assert(dividend.getType().isIndex() && "expected index-typed value");
Value divisorMinusOneCst = builder.create<ConstantIndexOp>(loc, divisor - 1);
Value divisorCst = builder.create<ConstantIndexOp>(loc, divisor);
@@ -849,7 +848,7 @@ static Value ceilDivPositive(OpBuilder &builder, Location loc, Value dividend,
// where divis is rounding-to-zero division.
static Value ceilDivPositive(OpBuilder &builder, Location loc, Value dividend,
Value divisor) {
- assert(dividend->getType().isIndex() && "expected index-typed value");
+ assert(dividend.getType().isIndex() && "expected index-typed value");
Value cstOne = builder.create<ConstantIndexOp>(loc, 1);
Value divisorMinusOne = builder.create<SubIOp>(loc, divisor, cstOne);
@@ -968,7 +967,7 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp,
static void
replaceAllUsesExcept(Value orig, Value replacement,
const SmallPtrSetImpl<Operation *> &exceptions) {
- for (auto &use : llvm::make_early_inc_range(orig->getUses())) {
+ for (auto &use : llvm::make_early_inc_range(orig.getUses())) {
if (exceptions.count(use.getOwner()) == 0)
use.set(replacement);
}
@@ -992,12 +991,12 @@ static void normalizeLoop(loop::ForOp loop, loop::ForOp outer,
// a constant one step.
bool isZeroBased = false;
if (auto ubCst =
- dyn_cast_or_null<ConstantIndexOp>(loop.lowerBound()->getDefiningOp()))
+ dyn_cast_or_null<ConstantIndexOp>(loop.lowerBound().getDefiningOp()))
isZeroBased = ubCst.getValue() == 0;
bool isStepOne = false;
if (auto stepCst =
- dyn_cast_or_null<ConstantIndexOp>(loop.step()->getDefiningOp()))
+ dyn_cast_or_null<ConstantIndexOp>(loop.step().getDefiningOp()))
isStepOne = stepCst.getValue() == 1;
if (isZeroBased && isStepOne)
@@ -1034,8 +1033,8 @@ static void normalizeLoop(loop::ForOp loop, loop::ForOp outer,
Value shifted =
isZeroBased ? scaled : builder.create<AddIOp>(loc, scaled, lb);
- SmallPtrSet<Operation *, 2> preserve{scaled->getDefiningOp(),
- shifted->getDefiningOp()};
+ SmallPtrSet<Operation *, 2> preserve{scaled.getDefiningOp(),
+ shifted.getDefiningOp()};
replaceAllUsesExcept(loop.getInductionVar(), shifted, preserve);
}
@@ -1175,7 +1174,7 @@ static void getMultiLevelStrides(const MemRefRegion &region,
int64_t numEltPerStride = 1;
int64_t stride = 1;
for (int d = bufferShape.size() - 1; d >= 1; d--) {
- int64_t dimSize = region.memref->getType().cast<MemRefType>().getDimSize(d);
+ int64_t dimSize = region.memref.getType().cast<MemRefType>().getDimSize(d);
stride *= dimSize;
numEltPerStride *= bufferShape[d];
// A stride is needed only if the region has a shorter extent than the
@@ -1295,7 +1294,7 @@ static LogicalResult generateCopy(
auto loc = region.loc;
auto memref = region.memref;
- auto memRefType = memref->getType().cast<MemRefType>();
+ auto memRefType = memref.getType().cast<MemRefType>();
auto layoutMaps = memRefType.getAffineMaps();
if (layoutMaps.size() > 1 ||
@@ -1560,7 +1559,7 @@ static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs,
assert(false && "expected load or store op");
return false;
}
- auto memRefType = region->memref->getType().cast<MemRefType>();
+ auto memRefType = region->memref.getType().cast<MemRefType>();
if (!memRefType.hasStaticShape())
return false;
diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp
index ca26074f288..197f608f82c 100644
--- a/mlir/lib/Transforms/Utils/RegionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp
@@ -20,7 +20,7 @@ using namespace mlir;
void mlir::replaceAllUsesInRegionWith(Value orig, Value replacement,
Region &region) {
- for (auto &use : llvm::make_early_inc_range(orig->getUses())) {
+ for (auto &use : llvm::make_early_inc_range(orig.getUses())) {
if (region.isAncestor(use.getOwner()->getParentRegion()))
use.set(replacement);
}
@@ -42,7 +42,7 @@ void mlir::visitUsedValuesDefinedAbove(
region.walk([callback, &properAncestors](Operation *op) {
for (OpOperand &operand : op->getOpOperands())
// Callback on values defined in a proper ancestor of region.
- if (properAncestors.count(operand.get()->getParentRegion()))
+ if (properAncestors.count(operand.get().getParentRegion()))
callback(&operand);
});
}
@@ -180,7 +180,7 @@ static bool isUseSpeciallyKnownDead(OpOperand &use, LiveMap &liveMap) {
}
static void processValue(Value value, LiveMap &liveMap) {
- bool provedLive = llvm::any_of(value->getUses(), [&](OpOperand &use) {
+ bool provedLive = llvm::any_of(value.getUses(), [&](OpOperand &use) {
if (isUseSpeciallyKnownDead(use, liveMap))
return false;
return liveMap.wasProvenLive(use.getOwner());
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index a6629183dee..8dbccd1a72d 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -52,9 +52,9 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
AffineMap indexRemap,
ArrayRef<Value> extraOperands,
ArrayRef<Value> symbolOperands) {
- unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank();
+ unsigned newMemRefRank = newMemRef.getType().cast<MemRefType>().getRank();
(void)newMemRefRank; // unused in opt mode
- unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank();
+ unsigned oldMemRefRank = oldMemRef.getType().cast<MemRefType>().getRank();
(void)oldMemRefRank; // unused in opt mode
if (indexRemap) {
assert(indexRemap.getNumSymbols() == symbolOperands.size() &&
@@ -67,8 +67,8 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
}
// Assert same elemental type.
- assert(oldMemRef->getType().cast<MemRefType>().getElementType() ==
- newMemRef->getType().cast<MemRefType>().getElementType());
+ assert(oldMemRef.getType().cast<MemRefType>().getElementType() ==
+ newMemRef.getType().cast<MemRefType>().getElementType());
if (!isMemRefDereferencingOp(*op))
// Failure: memref used in a non-dereferencing context (potentially
@@ -152,7 +152,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
// Prepend 'extraIndices' in 'newMapOperands'.
for (auto extraIndex : extraIndices) {
- assert(extraIndex->getDefiningOp()->getNumResults() == 1 &&
+ assert(extraIndex.getDefiningOp()->getNumResults() == 1 &&
"single result op's expected to generate these indices");
assert((isValidDim(extraIndex) || isValidSymbol(extraIndex)) &&
"invalid memory op index");
@@ -171,8 +171,8 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
canonicalizeMapAndOperands(&newMap, &newMapOperands);
// Remove any affine.apply's that became dead as a result of composition.
for (auto value : affineApplyOps)
- if (value->use_empty())
- value->getDefiningOp()->erase();
+ if (value.use_empty())
+ value.getDefiningOp()->erase();
// Construct the new operation using this memref.
OperationState state(op->getLoc(), op->getName());
@@ -195,7 +195,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
// Result types don't change. Both memref's are of the same elemental type.
state.types.reserve(op->getNumResults());
for (auto result : op->getResults())
- state.types.push_back(result->getType());
+ state.types.push_back(result.getType());
// Add attribute for 'newMap', other Attributes do not change.
auto newMapAttr = AffineMapAttr::get(newMap);
@@ -222,9 +222,9 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
ArrayRef<Value> symbolOperands,
Operation *domInstFilter,
Operation *postDomInstFilter) {
- unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank();
+ unsigned newMemRefRank = newMemRef.getType().cast<MemRefType>().getRank();
(void)newMemRefRank; // unused in opt mode
- unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank();
+ unsigned oldMemRefRank = oldMemRef.getType().cast<MemRefType>().getRank();
(void)oldMemRefRank;
if (indexRemap) {
assert(indexRemap.getNumSymbols() == symbolOperands.size() &&
@@ -237,8 +237,8 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
}
// Assert same elemental type.
- assert(oldMemRef->getType().cast<MemRefType>().getElementType() ==
- newMemRef->getType().cast<MemRefType>().getElementType());
+ assert(oldMemRef.getType().cast<MemRefType>().getElementType() ==
+ newMemRef.getType().cast<MemRefType>().getElementType());
std::unique_ptr<DominanceInfo> domInfo;
std::unique_ptr<PostDominanceInfo> postDomInfo;
@@ -254,7 +254,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
// DenseSet since an operation could potentially have multiple uses of a
// memref (although rare), and the replacement later is going to erase ops.
DenseSet<Operation *> opsToReplace;
- for (auto *op : oldMemRef->getUsers()) {
+ for (auto *op : oldMemRef.getUsers()) {
// Skip this use if it's not dominated by domInstFilter.
if (domInstFilter && !domInfo->dominates(domInstFilter, op))
continue;
@@ -325,7 +325,7 @@ void mlir::createAffineComputationSlice(
SmallVector<Value, 4> subOperands;
subOperands.reserve(opInst->getNumOperands());
for (auto operand : opInst->getOperands())
- if (isa_and_nonnull<AffineApplyOp>(operand->getDefiningOp()))
+ if (isa_and_nonnull<AffineApplyOp>(operand.getDefiningOp()))
subOperands.push_back(operand);
// Gather sequence of AffineApplyOps reachable from 'subOperands'.
@@ -340,7 +340,7 @@ void mlir::createAffineComputationSlice(
bool localized = true;
for (auto *op : affineApplyOps) {
for (auto result : op->getResults()) {
- for (auto *user : result->getUsers()) {
+ for (auto *user : result.getUsers()) {
if (user != opInst) {
localized = false;
break;
@@ -461,9 +461,9 @@ LogicalResult mlir::normalizeMemRef(AllocOp allocOp) {
}
// Replace any uses of the original alloc op and erase it. All remaining uses
// have to be dealloc's; RAMUW above would've failed otherwise.
- assert(std::all_of(oldMemRef->user_begin(), oldMemRef->user_end(),
- [](Operation *op) { return isa<DeallocOp>(op); }));
- oldMemRef->replaceAllUsesWith(newAlloc);
+ assert(llvm::all_of(oldMemRef.getUsers(),
+ [](Operation *op) { return isa<DeallocOp>(op); }));
+ oldMemRef.replaceAllUsesWith(newAlloc);
allocOp.erase();
return success();
}
OpenPOWER on IntegriCloud