summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Analysis/LoopAnalysis.cpp
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-03-25 11:13:31 -0700
committerjpienaar <jpienaar@google.com>2019-03-29 17:39:19 -0700
commitaf1abcc80b6eb6de2049b6cc79bbeac92f134e58 (patch)
tree7cc496d2b446ac2db877f3574c00f4f61d1d1886 /mlir/lib/Analysis/LoopAnalysis.cpp
parent832567b3799f763ec3ba9480e1628c5a3de7fa6e (diff)
downloadbcm5719-llvm-af1abcc80b6eb6de2049b6cc79bbeac92f134e58.tar.gz
bcm5719-llvm-af1abcc80b6eb6de2049b6cc79bbeac92f134e58.zip
Replace usages of "operator->" with "." for the AffineOps.
Note: The "operator->" method is a temporary helper for the de-const transition and is gradually being phased out. PiperOrigin-RevId: 240179439
Diffstat (limited to 'mlir/lib/Analysis/LoopAnalysis.cpp')
-rw-r--r--mlir/lib/Analysis/LoopAnalysis.cpp34
1 files changed, 17 insertions, 17 deletions
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index ba9a29177fe..bf8e265dbb8 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -53,12 +53,12 @@ void mlir::buildTripCountMapAndOperands(
SmallVectorImpl<Value *> *tripCountOperands) {
int64_t loopSpan;
- int64_t step = forOp->getStep();
- FuncBuilder b(forOp->getInstruction());
+ int64_t step = forOp.getStep();
+ FuncBuilder b(forOp.getInstruction());
- if (forOp->hasConstantBounds()) {
- int64_t lb = forOp->getConstantLowerBound();
- int64_t ub = forOp->getConstantUpperBound();
+ if (forOp.hasConstantBounds()) {
+ int64_t lb = forOp.getConstantLowerBound();
+ int64_t ub = forOp.getConstantUpperBound();
loopSpan = ub - lb;
if (loopSpan < 0)
loopSpan = 0;
@@ -66,20 +66,20 @@ void mlir::buildTripCountMapAndOperands(
tripCountOperands->clear();
return;
}
- auto lbMap = forOp->getLowerBoundMap();
- auto ubMap = forOp->getUpperBoundMap();
+ auto lbMap = forOp.getLowerBoundMap();
+ auto ubMap = forOp.getUpperBoundMap();
if (lbMap.getNumResults() != 1) {
*map = AffineMap();
return;
}
- SmallVector<Value *, 4> lbOperands(forOp->getLowerBoundOperands());
- SmallVector<Value *, 4> ubOperands(forOp->getUpperBoundOperands());
- auto lb = b.create<AffineApplyOp>(forOp->getLoc(), lbMap, lbOperands);
+ SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
+ SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands());
+ auto lb = b.create<AffineApplyOp>(forOp.getLoc(), lbMap, lbOperands);
SmallVector<Value *, 4> ubs;
ubs.reserve(ubMap.getNumResults());
for (auto ubExpr : ubMap.getResults())
ubs.push_back(b.create<AffineApplyOp>(
- forOp->getLoc(),
+ forOp.getLoc(),
b.getAffineMap(ubMap.getNumDims(), ubMap.getNumSymbols(), {ubExpr}, {}),
ubOperands));
@@ -102,8 +102,8 @@ void mlir::buildTripCountMapAndOperands(
for (auto *v : ubs)
if (v->use_empty())
v->getDefiningInst()->erase();
- if (lb->use_empty())
- lb->erase();
+ if (lb.use_empty())
+ lb.erase();
}
/// Returns the trip count of the loop if it's a constant, None otherwise. This
@@ -280,7 +280,7 @@ using VectorizableInstFun = std::function<bool(AffineForOp, Instruction &)>;
static bool isVectorizableLoopWithCond(AffineForOp loop,
VectorizableInstFun isVectorizableInst) {
- auto *forInst = loop->getInstruction();
+ auto *forInst = loop.getInstruction();
if (!matcher::isParallelLoop(*forInst) &&
!matcher::isReductionLoop(*forInst)) {
return false;
@@ -339,9 +339,9 @@ bool mlir::isVectorizableLoopAlongFastestVaryingMemRefDim(
[fastestVaryingDim](AffineForOp loop, Instruction &op) {
auto load = op.dyn_cast<LoadOp>();
auto store = op.dyn_cast<StoreOp>();
- return load ? isContiguousAccess(*loop->getInductionVar(), load,
+ return load ? isContiguousAccess(*loop.getInductionVar(), load,
fastestVaryingDim)
- : isContiguousAccess(*loop->getInductionVar(), store,
+ : isContiguousAccess(*loop.getInductionVar(), store,
fastestVaryingDim);
});
return isVectorizableLoopWithCond(loop, fun);
@@ -360,7 +360,7 @@ bool mlir::isVectorizableLoop(AffineForOp loop) {
// TODO(mlir-team): extend this to check for memory-based dependence
// violation when we have the support.
bool mlir::isInstwiseShiftValid(AffineForOp forOp, ArrayRef<uint64_t> shifts) {
- auto *forBody = forOp->getBody();
+ auto *forBody = forOp.getBody();
assert(shifts.size() == forBody->getInstructions().size());
// Work backwards over the body of the block so that the shift of a use's
OpenPOWER on IntegriCloud