summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Analysis/LoopAnalysis.cpp
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-12-22 21:59:55 -0800
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-12-22 22:00:23 -0800
commit35807bc4c5c9d8abc31ba0b2f955a82abf276e12 (patch)
treed083d37d993a774239081509a50e3e6c65366421 /mlir/lib/Analysis/LoopAnalysis.cpp
parent22954a0e408afde1d8686dffb3a3dcab107a2cd3 (diff)
downloadbcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.tar.gz
bcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.zip
NFC: Introduce new ValuePtr/ValueRef typedefs to simplify the transition to Value being value-typed.
This is an initial step to refactoring the representation of OpResult as proposed in: https://groups.google.com/a/tensorflow.org/g/mlir/c/XXzzKhqqF_0/m/v6bKb08WCgAJ This change will make it much simpler to incrementally transition all of the existing code to use value-typed semantics. PiperOrigin-RevId: 286844725
Diffstat (limited to 'mlir/lib/Analysis/LoopAnalysis.cpp')
-rw-r--r--mlir/lib/Analysis/LoopAnalysis.cpp30
1 files changed, 15 insertions, 15 deletions
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index a81116579ce..9dfbfe0c542 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -43,7 +43,7 @@ using namespace mlir;
// be more powerful (since both inequalities and equalities will be considered).
void mlir::buildTripCountMapAndOperands(
AffineForOp forOp, AffineMap *tripCountMap,
- SmallVectorImpl<Value *> *tripCountOperands) {
+ SmallVectorImpl<ValuePtr> *tripCountOperands) {
int64_t loopSpan;
int64_t step = forOp.getStep();
@@ -65,8 +65,8 @@ void mlir::buildTripCountMapAndOperands(
*tripCountMap = AffineMap();
return;
}
- SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
- SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands());
+ SmallVector<ValuePtr, 4> lbOperands(forOp.getLowerBoundOperands());
+ SmallVector<ValuePtr, 4> ubOperands(forOp.getUpperBoundOperands());
// Difference of each upper bound expression from the single lower bound
// expression (divided by the step) provides the expressions for the trip
@@ -98,7 +98,7 @@ void mlir::buildTripCountMapAndOperands(
// works with analysis structures (FlatAffineConstraints) and thus doesn't
// update the IR.
Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
AffineMap map;
buildTripCountMapAndOperands(forOp, &map, &operands);
@@ -124,7 +124,7 @@ Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
/// expression analysis is used (indirectly through getTripCount), and
/// this method is thus able to determine non-trivial divisors.
uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) {
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
AffineMap map;
buildTripCountMapAndOperands(forOp, &map, &operands);
@@ -173,7 +173,7 @@ uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) {
///
/// Returns false in cases with more than one AffineApplyOp, this is
/// conservative.
-static bool isAccessIndexInvariant(Value *iv, Value *index) {
+static bool isAccessIndexInvariant(ValuePtr iv, ValuePtr index) {
assert(isForInductionVar(iv) && "iv must be a AffineForOp");
assert(index->getType().isa<IndexType>() && "index must be of IndexType");
SmallVector<Operation *, 4> affineApplyOps;
@@ -197,11 +197,11 @@ static bool isAccessIndexInvariant(Value *iv, Value *index) {
return !(AffineValueMap(composeOp).isFunctionOf(0, iv));
}
-DenseSet<Value *> mlir::getInvariantAccesses(Value *iv,
- ArrayRef<Value *> indices) {
- DenseSet<Value *> res;
+DenseSet<ValuePtr> mlir::getInvariantAccesses(ValuePtr iv,
+ ArrayRef<ValuePtr> indices) {
+ DenseSet<ValuePtr> res;
for (unsigned idx = 0, n = indices.size(); idx < n; ++idx) {
- auto *val = indices[idx];
+ auto val = indices[idx];
if (isAccessIndexInvariant(iv, val)) {
res.insert(val);
}
@@ -229,7 +229,7 @@ DenseSet<Value *> mlir::getInvariantAccesses(Value *iv,
///
// TODO(ntv): check strides.
template <typename LoadOrStoreOp>
-static bool isContiguousAccess(Value *iv, LoadOrStoreOp memoryOp,
+static bool isContiguousAccess(ValuePtr iv, LoadOrStoreOp memoryOp,
int *memRefDim) {
static_assert(std::is_same<LoadOrStoreOp, AffineLoadOp>::value ||
std::is_same<LoadOrStoreOp, AffineStoreOp>::value,
@@ -250,11 +250,11 @@ static bool isContiguousAccess(Value *iv, LoadOrStoreOp memoryOp,
int uniqueVaryingIndexAlongIv = -1;
auto accessMap = memoryOp.getAffineMap();
- SmallVector<Value *, 4> mapOperands(memoryOp.getMapOperands());
+ SmallVector<ValuePtr, 4> mapOperands(memoryOp.getMapOperands());
unsigned numDims = accessMap.getNumDims();
for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
// Gather map operands used result expr 'i' in 'exprOperands'.
- SmallVector<Value *, 4> exprOperands;
+ SmallVector<ValuePtr, 4> exprOperands;
auto resultExpr = accessMap.getResult(i);
resultExpr.walk([&](AffineExpr expr) {
if (auto dimExpr = expr.dyn_cast<AffineDimExpr>())
@@ -263,7 +263,7 @@ static bool isContiguousAccess(Value *iv, LoadOrStoreOp memoryOp,
exprOperands.push_back(mapOperands[numDims + symExpr.getPosition()]);
});
// Check access invariance of each operand in 'exprOperands'.
- for (auto *exprOperand : exprOperands) {
+ for (auto exprOperand : exprOperands) {
if (!isAccessIndexInvariant(iv, exprOperand)) {
if (uniqueVaryingIndexAlongIv != -1) {
// 2+ varying indices -> do not vectorize along iv.
@@ -382,7 +382,7 @@ bool mlir::isInstwiseShiftValid(AffineForOp forOp, ArrayRef<uint64_t> shifts) {
// Validate the results of this operation if it were to be shifted.
for (unsigned i = 0, e = op.getNumResults(); i < e; ++i) {
- Value *result = op.getResult(i);
+ ValuePtr result = op.getResult(i);
for (auto *user : result->getUsers()) {
// If an ancestor operation doesn't lie in the block of forOp,
// there is no shift to check.
OpenPOWER on IntegriCloud