summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Analysis
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-12-23 14:45:01 -0800
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-12-23 16:36:53 -0800
commite62a69561fb9d7b1013d2853da68d79a7907fead (patch)
tree0dd059094cbfb8d904513abcdc1fbe8cfa89bb09 /mlir/lib/Analysis
parent5d5bd2e1da29d976cb125dbb3cd097a5e42b2be4 (diff)
downloadbcm5719-llvm-e62a69561fb9d7b1013d2853da68d79a7907fead.tar.gz
bcm5719-llvm-e62a69561fb9d7b1013d2853da68d79a7907fead.zip
NFC: Replace ValuePtr with Value and remove it now that Value is value-typed.
ValuePtr was a temporary typedef during the transition to a value-typed Value. PiperOrigin-RevId: 286945714
Diffstat (limited to 'mlir/lib/Analysis')
-rw-r--r--mlir/lib/Analysis/AffineAnalysis.cpp48
-rw-r--r--mlir/lib/Analysis/AffineStructures.cpp85
-rw-r--r--mlir/lib/Analysis/CallGraph.cpp2
-rw-r--r--mlir/lib/Analysis/Dominance.cpp2
-rw-r--r--mlir/lib/Analysis/Liveness.cpp34
-rw-r--r--mlir/lib/Analysis/LoopAnalysis.cpp25
-rw-r--r--mlir/lib/Analysis/Utils.cpp32
-rw-r--r--mlir/lib/Analysis/VectorAnalysis.cpp4
8 files changed, 116 insertions, 116 deletions
diff --git a/mlir/lib/Analysis/AffineAnalysis.cpp b/mlir/lib/Analysis/AffineAnalysis.cpp
index 27aa0748711..3358bb437ff 100644
--- a/mlir/lib/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Analysis/AffineAnalysis.cpp
@@ -39,10 +39,10 @@ using llvm::dbgs;
// TODO(andydavis) Add a method to AffineApplyOp which forward substitutes
// the AffineApplyOp into any user AffineApplyOps.
void mlir::getReachableAffineApplyOps(
- ArrayRef<ValuePtr> operands, SmallVectorImpl<Operation *> &affineApplyOps) {
+ ArrayRef<Value> operands, SmallVectorImpl<Operation *> &affineApplyOps) {
struct State {
// The ssa value for this node in the DFS traversal.
- ValuePtr value;
+ Value value;
// The operand index of 'value' to explore next during DFS traversal.
unsigned operandIndex;
};
@@ -90,7 +90,7 @@ void mlir::getReachableAffineApplyOps(
// setExprStride(ArrayRef<int64_t> expr, int64_t stride)
LogicalResult mlir::getIndexSet(MutableArrayRef<AffineForOp> forOps,
FlatAffineConstraints *domain) {
- SmallVector<ValuePtr, 4> indices;
+ SmallVector<Value, 4> indices;
extractForInductionVars(forOps, &indices);
// Reset while associated Values in 'indices' to the domain.
domain->reset(forOps.size(), /*numSymbols=*/0, /*numLocals=*/0, indices);
@@ -137,25 +137,25 @@ static LogicalResult getInstIndexSet(Operation *op,
// of maps to check. So getSrcDimOrSymPos would be "getPos(value, {0, 2})".
class ValuePositionMap {
public:
- void addSrcValue(ValuePtr value) {
+ void addSrcValue(Value value) {
if (addValueAt(value, &srcDimPosMap, numSrcDims))
++numSrcDims;
}
- void addDstValue(ValuePtr value) {
+ void addDstValue(Value value) {
if (addValueAt(value, &dstDimPosMap, numDstDims))
++numDstDims;
}
- void addSymbolValue(ValuePtr value) {
+ void addSymbolValue(Value value) {
if (addValueAt(value, &symbolPosMap, numSymbols))
++numSymbols;
}
- unsigned getSrcDimOrSymPos(ValuePtr value) const {
+ unsigned getSrcDimOrSymPos(Value value) const {
return getDimOrSymPos(value, srcDimPosMap, 0);
}
- unsigned getDstDimOrSymPos(ValuePtr value) const {
+ unsigned getDstDimOrSymPos(Value value) const {
return getDimOrSymPos(value, dstDimPosMap, numSrcDims);
}
- unsigned getSymPos(ValuePtr value) const {
+ unsigned getSymPos(Value value) const {
auto it = symbolPosMap.find(value);
assert(it != symbolPosMap.end());
return numSrcDims + numDstDims + it->second;
@@ -167,7 +167,7 @@ public:
unsigned getNumSymbols() const { return numSymbols; }
private:
- bool addValueAt(ValuePtr value, DenseMap<ValuePtr, unsigned> *posMap,
+ bool addValueAt(Value value, DenseMap<Value, unsigned> *posMap,
unsigned position) {
auto it = posMap->find(value);
if (it == posMap->end()) {
@@ -176,8 +176,8 @@ private:
}
return false;
}
- unsigned getDimOrSymPos(ValuePtr value,
- const DenseMap<ValuePtr, unsigned> &dimPosMap,
+ unsigned getDimOrSymPos(Value value,
+ const DenseMap<Value, unsigned> &dimPosMap,
unsigned dimPosOffset) const {
auto it = dimPosMap.find(value);
if (it != dimPosMap.end()) {
@@ -191,9 +191,9 @@ private:
unsigned numSrcDims = 0;
unsigned numDstDims = 0;
unsigned numSymbols = 0;
- DenseMap<ValuePtr, unsigned> srcDimPosMap;
- DenseMap<ValuePtr, unsigned> dstDimPosMap;
- DenseMap<ValuePtr, unsigned> symbolPosMap;
+ DenseMap<Value, unsigned> srcDimPosMap;
+ DenseMap<Value, unsigned> dstDimPosMap;
+ DenseMap<Value, unsigned> symbolPosMap;
};
// Builds a map from Value to identifier position in a new merged identifier
@@ -210,7 +210,7 @@ static void buildDimAndSymbolPositionMaps(
const FlatAffineConstraints &dstDomain, const AffineValueMap &srcAccessMap,
const AffineValueMap &dstAccessMap, ValuePositionMap *valuePosMap,
FlatAffineConstraints *dependenceConstraints) {
- auto updateValuePosMap = [&](ArrayRef<ValuePtr> values, bool isSrc) {
+ auto updateValuePosMap = [&](ArrayRef<Value> values, bool isSrc) {
for (unsigned i = 0, e = values.size(); i < e; ++i) {
auto value = values[i];
if (!isForInductionVar(values[i])) {
@@ -225,7 +225,7 @@ static void buildDimAndSymbolPositionMaps(
}
};
- SmallVector<ValuePtr, 4> srcValues, destValues;
+ SmallVector<Value, 4> srcValues, destValues;
srcDomain.getIdValues(0, srcDomain.getNumDimAndSymbolIds(), &srcValues);
dstDomain.getIdValues(0, dstDomain.getNumDimAndSymbolIds(), &destValues);
// Update value position map with identifiers from src iteration domain.
@@ -264,7 +264,7 @@ void initDependenceConstraints(const FlatAffineConstraints &srcDomain,
numLocals);
// Set values corresponding to dependence constraint identifiers.
- SmallVector<ValuePtr, 4> srcLoopIVs, dstLoopIVs;
+ SmallVector<Value, 4> srcLoopIVs, dstLoopIVs;
srcDomain.getIdValues(0, srcDomain.getNumDimIds(), &srcLoopIVs);
dstDomain.getIdValues(0, dstDomain.getNumDimIds(), &dstLoopIVs);
@@ -273,7 +273,7 @@ void initDependenceConstraints(const FlatAffineConstraints &srcDomain,
srcLoopIVs.size(), srcLoopIVs.size() + dstLoopIVs.size(), dstLoopIVs);
// Set values for the symbolic identifier dimensions.
- auto setSymbolIds = [&](ArrayRef<ValuePtr> values) {
+ auto setSymbolIds = [&](ArrayRef<Value> values) {
for (auto value : values) {
if (!isForInductionVar(value)) {
assert(isValidSymbol(value) && "expected symbol");
@@ -285,7 +285,7 @@ void initDependenceConstraints(const FlatAffineConstraints &srcDomain,
setSymbolIds(srcAccessMap.getOperands());
setSymbolIds(dstAccessMap.getOperands());
- SmallVector<ValuePtr, 8> srcSymbolValues, dstSymbolValues;
+ SmallVector<Value, 8> srcSymbolValues, dstSymbolValues;
srcDomain.getIdValues(srcDomain.getNumDimIds(),
srcDomain.getNumDimAndSymbolIds(), &srcSymbolValues);
dstDomain.getIdValues(dstDomain.getNumDimIds(),
@@ -389,10 +389,10 @@ addMemRefAccessConstraints(const AffineValueMap &srcAccessMap,
unsigned numResults = srcMap.getNumResults();
unsigned srcNumIds = srcMap.getNumDims() + srcMap.getNumSymbols();
- ArrayRef<ValuePtr> srcOperands = srcAccessMap.getOperands();
+ ArrayRef<Value> srcOperands = srcAccessMap.getOperands();
unsigned dstNumIds = dstMap.getNumDims() + dstMap.getNumSymbols();
- ArrayRef<ValuePtr> dstOperands = dstAccessMap.getOperands();
+ ArrayRef<Value> dstOperands = dstAccessMap.getOperands();
std::vector<SmallVector<int64_t, 8>> srcFlatExprs;
std::vector<SmallVector<int64_t, 8>> destFlatExprs;
@@ -448,7 +448,7 @@ addMemRefAccessConstraints(const AffineValueMap &srcAccessMap,
}
// Add equality constraints for any operands that are defined by constant ops.
- auto addEqForConstOperands = [&](ArrayRef<ValuePtr> operands) {
+ auto addEqForConstOperands = [&](ArrayRef<Value> operands) {
for (unsigned i = 0, e = operands.size(); i < e; ++i) {
if (isForInductionVar(operands[i]))
continue;
@@ -666,7 +666,7 @@ void MemRefAccess::getAccessMap(AffineValueMap *accessMap) const {
map = loadOp.getAffineMap();
else if (auto storeOp = dyn_cast<AffineStoreOp>(opInst))
map = storeOp.getAffineMap();
- SmallVector<ValuePtr, 8> operands(indices.begin(), indices.end());
+ SmallVector<Value, 8> operands(indices.begin(), indices.end());
fullyComposeAffineMapAndOperands(&map, &operands);
map = simplifyAffineMap(map);
canonicalizeMapAndOperands(&map, &operands);
diff --git a/mlir/lib/Analysis/AffineStructures.cpp b/mlir/lib/Analysis/AffineStructures.cpp
index ce96a19751f..78a869884ee 100644
--- a/mlir/lib/Analysis/AffineStructures.cpp
+++ b/mlir/lib/Analysis/AffineStructures.cpp
@@ -195,8 +195,8 @@ MutableIntegerSet::MutableIntegerSet(unsigned numDims, unsigned numSymbols,
// AffineValueMap.
//===----------------------------------------------------------------------===//
-AffineValueMap::AffineValueMap(AffineMap map, ArrayRef<ValuePtr> operands,
- ArrayRef<ValuePtr> results)
+AffineValueMap::AffineValueMap(AffineMap map, ArrayRef<Value> operands,
+ ArrayRef<Value> results)
: map(map), operands(operands.begin(), operands.end()),
results(results.begin(), results.end()) {}
@@ -210,8 +210,8 @@ AffineValueMap::AffineValueMap(AffineBound bound)
: map(bound.getMap()),
operands(bound.operand_begin(), bound.operand_end()) {}
-void AffineValueMap::reset(AffineMap map, ArrayRef<ValuePtr> operands,
- ArrayRef<ValuePtr> results) {
+void AffineValueMap::reset(AffineMap map, ArrayRef<Value> operands,
+ ArrayRef<Value> results) {
this->map.reset(map);
this->operands.assign(operands.begin(), operands.end());
this->results.assign(results.begin(), results.end());
@@ -223,14 +223,14 @@ void AffineValueMap::difference(const AffineValueMap &a,
// Fully compose A's map + operands.
auto aMap = a.getAffineMap();
- SmallVector<ValuePtr, 4> aOperands(a.getOperands().begin(),
- a.getOperands().end());
+ SmallVector<Value, 4> aOperands(a.getOperands().begin(),
+ a.getOperands().end());
fullyComposeAffineMapAndOperands(&aMap, &aOperands);
// Use the affine apply normalizer to get B's map into A's coordinate space.
AffineApplyNormalizer normalizer(aMap, aOperands);
- SmallVector<ValuePtr, 4> bOperands(b.getOperands().begin(),
- b.getOperands().end());
+ SmallVector<Value, 4> bOperands(b.getOperands().begin(),
+ b.getOperands().end());
auto bMap = b.getAffineMap();
normalizer.normalize(&bMap, &bOperands);
@@ -254,7 +254,7 @@ void AffineValueMap::difference(const AffineValueMap &a,
// Returns true and sets 'indexOfMatch' if 'valueToMatch' is found in
// 'valuesToSearch' beginning at 'indexStart'. Returns false otherwise.
-static bool findIndex(ValuePtr valueToMatch, ArrayRef<ValuePtr> valuesToSearch,
+static bool findIndex(Value valueToMatch, ArrayRef<Value> valuesToSearch,
unsigned indexStart, unsigned *indexOfMatch) {
unsigned size = valuesToSearch.size();
for (unsigned i = indexStart; i < size; ++i) {
@@ -272,7 +272,7 @@ inline bool AffineValueMap::isMultipleOf(unsigned idx, int64_t factor) const {
/// This method uses the invariant that operands are always positionally aligned
/// with the AffineDimExpr in the underlying AffineMap.
-bool AffineValueMap::isFunctionOf(unsigned idx, ValuePtr value) const {
+bool AffineValueMap::isFunctionOf(unsigned idx, Value value) const {
unsigned index;
if (!findIndex(value, operands, /*indexStart=*/0, &index)) {
return false;
@@ -283,12 +283,12 @@ bool AffineValueMap::isFunctionOf(unsigned idx, ValuePtr value) const {
return expr.isFunctionOfDim(index);
}
-ValuePtr AffineValueMap::getOperand(unsigned i) const {
- return static_cast<ValuePtr>(operands[i]);
+Value AffineValueMap::getOperand(unsigned i) const {
+ return static_cast<Value>(operands[i]);
}
-ArrayRef<ValuePtr> AffineValueMap::getOperands() const {
- return ArrayRef<ValuePtr>(operands);
+ArrayRef<Value> AffineValueMap::getOperands() const {
+ return ArrayRef<Value>(operands);
}
AffineMap AffineValueMap::getAffineMap() const { return map.getAffineMap(); }
@@ -369,7 +369,7 @@ void FlatAffineConstraints::reset(unsigned numReservedInequalities,
unsigned newNumReservedCols,
unsigned newNumDims, unsigned newNumSymbols,
unsigned newNumLocals,
- ArrayRef<ValuePtr> idArgs) {
+ ArrayRef<Value> idArgs) {
assert(newNumReservedCols >= newNumDims + newNumSymbols + newNumLocals + 1 &&
"minimum 1 column");
numReservedCols = newNumReservedCols;
@@ -392,7 +392,7 @@ void FlatAffineConstraints::reset(unsigned numReservedInequalities,
void FlatAffineConstraints::reset(unsigned newNumDims, unsigned newNumSymbols,
unsigned newNumLocals,
- ArrayRef<ValuePtr> idArgs) {
+ ArrayRef<Value> idArgs) {
reset(0, 0, newNumDims + newNumSymbols + newNumLocals + 1, newNumDims,
newNumSymbols, newNumLocals, idArgs);
}
@@ -419,17 +419,17 @@ void FlatAffineConstraints::addLocalId(unsigned pos) {
addId(IdKind::Local, pos);
}
-void FlatAffineConstraints::addDimId(unsigned pos, ValuePtr id) {
+void FlatAffineConstraints::addDimId(unsigned pos, Value id) {
addId(IdKind::Dimension, pos, id);
}
-void FlatAffineConstraints::addSymbolId(unsigned pos, ValuePtr id) {
+void FlatAffineConstraints::addSymbolId(unsigned pos, Value id) {
addId(IdKind::Symbol, pos, id);
}
/// Adds a dimensional identifier. The added column is initialized to
/// zero.
-void FlatAffineConstraints::addId(IdKind kind, unsigned pos, ValuePtr id) {
+void FlatAffineConstraints::addId(IdKind kind, unsigned pos, Value id) {
if (kind == IdKind::Dimension) {
assert(pos <= getNumDimIds());
} else if (kind == IdKind::Symbol) {
@@ -518,7 +518,7 @@ bool FlatAffineConstraints::areIdsAlignedWithOther(
/// Checks if the SSA values associated with `cst''s identifiers are unique.
static bool LLVM_ATTRIBUTE_UNUSED
areIdsUnique(const FlatAffineConstraints &cst) {
- SmallPtrSet<ValuePtr, 8> uniqueIds;
+ SmallPtrSet<Value, 8> uniqueIds;
for (auto id : cst.getIds()) {
if (id.hasValue() && !uniqueIds.insert(id.getValue()).second)
return false;
@@ -562,11 +562,11 @@ static void mergeAndAlignIds(unsigned offset, FlatAffineConstraints *A,
assert(std::all_of(A->getIds().begin() + offset,
A->getIds().begin() + A->getNumDimAndSymbolIds(),
- [](Optional<ValuePtr> id) { return id.hasValue(); }));
+ [](Optional<Value> id) { return id.hasValue(); }));
assert(std::all_of(B->getIds().begin() + offset,
B->getIds().begin() + B->getNumDimAndSymbolIds(),
- [](Optional<ValuePtr> id) { return id.hasValue(); }));
+ [](Optional<Value> id) { return id.hasValue(); }));
// Place local id's of A after local id's of B.
for (unsigned l = 0, e = A->getNumLocalIds(); l < e; l++) {
@@ -577,7 +577,7 @@ static void mergeAndAlignIds(unsigned offset, FlatAffineConstraints *A,
A->addLocalId(A->getNumLocalIds());
}
- SmallVector<ValuePtr, 4> aDimValues, aSymValues;
+ SmallVector<Value, 4> aDimValues, aSymValues;
A->getIdValues(offset, A->getNumDimIds(), &aDimValues);
A->getIdValues(A->getNumDimIds(), A->getNumDimAndSymbolIds(), &aSymValues);
{
@@ -776,7 +776,7 @@ LogicalResult FlatAffineConstraints::composeMatchingMap(AffineMap other) {
}
// Turn a dimension into a symbol.
-static void turnDimIntoSymbol(FlatAffineConstraints *cst, ValueRef id) {
+static void turnDimIntoSymbol(FlatAffineConstraints *cst, Value id) {
unsigned pos;
if (cst->findId(id, &pos) && pos < cst->getNumDimIds()) {
swapId(cst, pos, cst->getNumDimIds() - 1);
@@ -785,7 +785,7 @@ static void turnDimIntoSymbol(FlatAffineConstraints *cst, ValueRef id) {
}
// Turn a symbol into a dimension.
-static void turnSymbolIntoDim(FlatAffineConstraints *cst, ValueRef id) {
+static void turnSymbolIntoDim(FlatAffineConstraints *cst, Value id) {
unsigned pos;
if (cst->findId(id, &pos) && pos >= cst->getNumDimIds() &&
pos < cst->getNumDimAndSymbolIds()) {
@@ -797,7 +797,7 @@ static void turnSymbolIntoDim(FlatAffineConstraints *cst, ValueRef id) {
// Changes all symbol identifiers which are loop IVs to dim identifiers.
void FlatAffineConstraints::convertLoopIVSymbolsToDims() {
// Gather all symbols which are loop IVs.
- SmallVector<ValuePtr, 4> loopIVs;
+ SmallVector<Value, 4> loopIVs;
for (unsigned i = getNumDimIds(), e = getNumDimAndSymbolIds(); i < e; i++) {
if (ids[i].hasValue() && getForInductionVarOwner(ids[i].getValue()))
loopIVs.push_back(ids[i].getValue());
@@ -808,7 +808,7 @@ void FlatAffineConstraints::convertLoopIVSymbolsToDims() {
}
}
-void FlatAffineConstraints::addInductionVarOrTerminalSymbol(ValuePtr id) {
+void FlatAffineConstraints::addInductionVarOrTerminalSymbol(Value id) {
if (containsId(*id))
return;
@@ -867,8 +867,8 @@ LogicalResult FlatAffineConstraints::addAffineForOpDomain(AffineForOp forOp) {
addConstantLowerBound(pos, forOp.getConstantLowerBound());
} else {
// Non-constant lower bound case.
- SmallVector<ValuePtr, 4> lbOperands(forOp.getLowerBoundOperands().begin(),
- forOp.getLowerBoundOperands().end());
+ SmallVector<Value, 4> lbOperands(forOp.getLowerBoundOperands().begin(),
+ forOp.getLowerBoundOperands().end());
if (failed(addLowerOrUpperBound(pos, forOp.getLowerBoundMap(), lbOperands,
/*eq=*/false, /*lower=*/true)))
return failure();
@@ -879,8 +879,8 @@ LogicalResult FlatAffineConstraints::addAffineForOpDomain(AffineForOp forOp) {
return success();
}
// Non-constant upper bound case.
- SmallVector<ValuePtr, 4> ubOperands(forOp.getUpperBoundOperands().begin(),
- forOp.getUpperBoundOperands().end());
+ SmallVector<Value, 4> ubOperands(forOp.getUpperBoundOperands().begin(),
+ forOp.getUpperBoundOperands().end());
return addLowerOrUpperBound(pos, forOp.getUpperBoundMap(), ubOperands,
/*eq=*/false, /*lower=*/false);
}
@@ -1748,7 +1748,7 @@ void FlatAffineConstraints::getSliceBounds(unsigned offset, unsigned num,
LogicalResult
FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
- ArrayRef<ValuePtr> boundOperands,
+ ArrayRef<Value> boundOperands,
bool eq, bool lower) {
assert(pos < getNumDimAndSymbolIds() && "invalid position");
// Equality follows the logic of lower bound except that we add an equality
@@ -1760,7 +1760,7 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
// Fully compose map and operands; canonicalize and simplify so that we
// transitively get to terminal symbols or loop IVs.
auto map = boundMap;
- SmallVector<ValuePtr, 4> operands(boundOperands.begin(), boundOperands.end());
+ SmallVector<Value, 4> operands(boundOperands.begin(), boundOperands.end());
fullyComposeAffineMapAndOperands(&map, &operands);
map = simplifyAffineMap(map);
canonicalizeMapAndOperands(&map, &operands);
@@ -1838,9 +1838,10 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
// Note that both lower/upper bounds use operands from 'operands'.
// Returns failure for unimplemented cases such as semi-affine expressions or
// expressions with mod/floordiv.
-LogicalResult FlatAffineConstraints::addSliceBounds(
- ArrayRef<ValuePtr> values, ArrayRef<AffineMap> lbMaps,
- ArrayRef<AffineMap> ubMaps, ArrayRef<ValuePtr> operands) {
+LogicalResult FlatAffineConstraints::addSliceBounds(ArrayRef<Value> values,
+ ArrayRef<AffineMap> lbMaps,
+ ArrayRef<AffineMap> ubMaps,
+ ArrayRef<Value> operands) {
assert(values.size() == lbMaps.size());
assert(lbMaps.size() == ubMaps.size());
@@ -1962,7 +1963,7 @@ void FlatAffineConstraints::addLocalFloorDiv(ArrayRef<int64_t> dividend,
addInequality(bound);
}
-bool FlatAffineConstraints::findId(ValueRef id, unsigned *pos) const {
+bool FlatAffineConstraints::findId(Value id, unsigned *pos) const {
unsigned i = 0;
for (const auto &mayBeId : ids) {
if (mayBeId.hasValue() && mayBeId.getValue() == id) {
@@ -1974,8 +1975,8 @@ bool FlatAffineConstraints::findId(ValueRef id, unsigned *pos) const {
return false;
}
-bool FlatAffineConstraints::containsId(ValueRef id) const {
- return llvm::any_of(ids, [&](const Optional<ValuePtr> &mayBeId) {
+bool FlatAffineConstraints::containsId(Value id) const {
+ return llvm::any_of(ids, [&](const Optional<Value> &mayBeId) {
return mayBeId.hasValue() && mayBeId.getValue() == id;
});
}
@@ -1999,7 +2000,7 @@ void FlatAffineConstraints::setIdToConstant(unsigned pos, int64_t val) {
/// Sets the specified identifier to a constant value; asserts if the id is not
/// found.
-void FlatAffineConstraints::setIdToConstant(ValueRef id, int64_t val) {
+void FlatAffineConstraints::setIdToConstant(Value id, int64_t val) {
unsigned pos;
if (!findId(id, &pos))
// This is a pre-condition for this method.
@@ -2564,7 +2565,7 @@ void FlatAffineConstraints::FourierMotzkinEliminate(
unsigned newNumDims = dimsSymbols.first;
unsigned newNumSymbols = dimsSymbols.second;
- SmallVector<Optional<ValuePtr>, 8> newIds;
+ SmallVector<Optional<Value>, 8> newIds;
newIds.reserve(numIds - 1);
newIds.append(ids.begin(), ids.begin() + pos);
newIds.append(ids.begin() + pos + 1, ids.end());
@@ -2700,7 +2701,7 @@ void FlatAffineConstraints::projectOut(unsigned pos, unsigned num) {
normalizeConstraintsByGCD();
}
-void FlatAffineConstraints::projectOut(ValuePtr id) {
+void FlatAffineConstraints::projectOut(Value id) {
unsigned pos;
bool ret = findId(*id, &pos);
assert(ret);
diff --git a/mlir/lib/Analysis/CallGraph.cpp b/mlir/lib/Analysis/CallGraph.cpp
index 65f6e83bcdf..c35421d55eb 100644
--- a/mlir/lib/Analysis/CallGraph.cpp
+++ b/mlir/lib/Analysis/CallGraph.cpp
@@ -179,7 +179,7 @@ CallGraphNode *CallGraph::resolveCallable(CallInterfaceCallable callable,
callee = SymbolTable::lookupNearestSymbolFrom(from,
symbolRef.getRootReference());
else
- callee = callable.get<ValuePtr>()->getDefiningOp();
+ callee = callable.get<Value>()->getDefiningOp();
// If the callee is non-null and is a valid callable object, try to get the
// called region from it.
diff --git a/mlir/lib/Analysis/Dominance.cpp b/mlir/lib/Analysis/Dominance.cpp
index ea1501e8998..e4af4c0d69b 100644
--- a/mlir/lib/Analysis/Dominance.cpp
+++ b/mlir/lib/Analysis/Dominance.cpp
@@ -118,7 +118,7 @@ bool DominanceInfo::properlyDominates(Operation *a, Operation *b) {
}
/// Return true if value A properly dominates operation B.
-bool DominanceInfo::properlyDominates(ValuePtr a, Operation *b) {
+bool DominanceInfo::properlyDominates(Value a, Operation *b) {
if (auto *aOp = a->getDefiningOp()) {
// The values defined by an operation do *not* dominate any nested
// operations.
diff --git a/mlir/lib/Analysis/Liveness.cpp b/mlir/lib/Analysis/Liveness.cpp
index 9b7b806c558..7ba31365f1a 100644
--- a/mlir/lib/Analysis/Liveness.cpp
+++ b/mlir/lib/Analysis/Liveness.cpp
@@ -31,13 +31,13 @@ struct BlockInfoBuilder {
/// Fills the block builder with initial liveness information.
BlockInfoBuilder(Block *block) : block(block) {
// Mark all block arguments (phis) as defined.
- for (BlockArgumentPtr argument : block->getArguments())
+ for (BlockArgument argument : block->getArguments())
defValues.insert(argument);
// Check all result values and whether their uses
// are inside this block or not (see outValues).
for (Operation &operation : *block)
- for (ValuePtr result : operation.getResults()) {
+ for (Value result : operation.getResults()) {
defValues.insert(result);
// Check whether this value will be in the outValues
@@ -54,7 +54,7 @@ struct BlockInfoBuilder {
// Check all operations for used operands.
for (Operation &operation : block->getOperations())
- for (ValuePtr operand : operation.getOperands()) {
+ for (Value operand : operation.getOperands()) {
// If the operand is already defined in the scope of this
// block, we can skip the value in the use set.
if (!defValues.count(operand))
@@ -164,7 +164,7 @@ void Liveness::build(MutableArrayRef<Region> regions) {
}
/// Gets liveness info (if any) for the given value.
-Liveness::OperationListT Liveness::resolveLiveness(ValuePtr value) const {
+Liveness::OperationListT Liveness::resolveLiveness(Value value) const {
OperationListT result;
SmallPtrSet<Block *, 32> visited;
SmallVector<Block *, 8> toProcess;
@@ -229,7 +229,7 @@ const Liveness::ValueSetT &Liveness::getLiveOut(Block *block) const {
/// Returns true if the given operation represent the last use of the
/// given value.
-bool Liveness::isLastUse(ValuePtr value, Operation *operation) const {
+bool Liveness::isLastUse(Value value, Operation *operation) const {
Block *block = operation->getBlock();
const LivenessBlockInfo *blockInfo = getLiveness(block);
@@ -254,21 +254,21 @@ void Liveness::print(raw_ostream &os) const {
// Builds unique block/value mappings for testing purposes.
DenseMap<Block *, size_t> blockIds;
DenseMap<Operation *, size_t> operationIds;
- DenseMap<ValuePtr, size_t> valueIds;
+ DenseMap<Value, size_t> valueIds;
for (Region &region : operation->getRegions())
for (Block &block : region) {
blockIds.insert({&block, blockIds.size()});
- for (BlockArgumentPtr argument : block.getArguments())
+ for (BlockArgument argument : block.getArguments())
valueIds.insert({argument, valueIds.size()});
for (Operation &operation : block) {
operationIds.insert({&operation, operationIds.size()});
- for (ValuePtr result : operation.getResults())
+ for (Value result : operation.getResults())
valueIds.insert({result, valueIds.size()});
}
}
// Local printing helpers
- auto printValueRef = [&](ValuePtr value) {
+ auto printValueRef = [&](Value value) {
if (Operation *defOp = value->getDefiningOp())
os << "val_" << defOp->getName();
else {
@@ -280,12 +280,12 @@ void Liveness::print(raw_ostream &os) const {
};
auto printValueRefs = [&](const ValueSetT &values) {
- std::vector<ValuePtr> orderedValues(values.begin(), values.end());
+ std::vector<Value> orderedValues(values.begin(), values.end());
std::sort(orderedValues.begin(), orderedValues.end(),
- [&](ValuePtr left, ValuePtr right) {
+ [&](Value left, Value right) {
return valueIds[left] < valueIds[right];
});
- for (ValuePtr value : orderedValues)
+ for (Value value : orderedValues)
printValueRef(value);
};
@@ -306,7 +306,7 @@ void Liveness::print(raw_ostream &os) const {
if (op.getNumResults() < 1)
continue;
os << "\n";
- for (ValuePtr result : op.getResults()) {
+ for (Value result : op.getResults()) {
os << "// ";
printValueRef(result);
os << ":";
@@ -331,18 +331,18 @@ void Liveness::print(raw_ostream &os) const {
//===----------------------------------------------------------------------===//
/// Returns true if the given value is in the live-in set.
-bool LivenessBlockInfo::isLiveIn(ValuePtr value) const {
+bool LivenessBlockInfo::isLiveIn(Value value) const {
return inValues.count(value);
}
/// Returns true if the given value is in the live-out set.
-bool LivenessBlockInfo::isLiveOut(ValuePtr value) const {
+bool LivenessBlockInfo::isLiveOut(Value value) const {
return outValues.count(value);
}
/// Gets the start operation for the given value
/// (must be referenced in this block).
-Operation *LivenessBlockInfo::getStartOperation(ValuePtr value) const {
+Operation *LivenessBlockInfo::getStartOperation(Value value) const {
Operation *definingOp = value->getDefiningOp();
// The given value is either live-in or is defined
// in the scope of this block.
@@ -353,7 +353,7 @@ Operation *LivenessBlockInfo::getStartOperation(ValuePtr value) const {
/// Gets the end operation for the given value using the start operation
/// provided (must be referenced in this block).
-Operation *LivenessBlockInfo::getEndOperation(ValuePtr value,
+Operation *LivenessBlockInfo::getEndOperation(Value value,
Operation *startOperation) const {
// The given value is either dying in this block or live-out.
if (isLiveOut(value))
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index 5499f887c1e..18c86dc63b4 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -34,7 +34,7 @@ using namespace mlir;
// be more powerful (since both inequalities and equalities will be considered).
void mlir::buildTripCountMapAndOperands(
AffineForOp forOp, AffineMap *tripCountMap,
- SmallVectorImpl<ValuePtr> *tripCountOperands) {
+ SmallVectorImpl<Value> *tripCountOperands) {
int64_t loopSpan;
int64_t step = forOp.getStep();
@@ -56,8 +56,8 @@ void mlir::buildTripCountMapAndOperands(
*tripCountMap = AffineMap();
return;
}
- SmallVector<ValuePtr, 4> lbOperands(forOp.getLowerBoundOperands());
- SmallVector<ValuePtr, 4> ubOperands(forOp.getUpperBoundOperands());
+ SmallVector<Value, 4> lbOperands(forOp.getLowerBoundOperands());
+ SmallVector<Value, 4> ubOperands(forOp.getUpperBoundOperands());
// Difference of each upper bound expression from the single lower bound
// expression (divided by the step) provides the expressions for the trip
@@ -89,7 +89,7 @@ void mlir::buildTripCountMapAndOperands(
// works with analysis structures (FlatAffineConstraints) and thus doesn't
// update the IR.
Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
- SmallVector<ValuePtr, 4> operands;
+ SmallVector<Value, 4> operands;
AffineMap map;
buildTripCountMapAndOperands(forOp, &map, &operands);
@@ -115,7 +115,7 @@ Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
/// expression analysis is used (indirectly through getTripCount), and
/// this method is thus able to determine non-trivial divisors.
uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) {
- SmallVector<ValuePtr, 4> operands;
+ SmallVector<Value, 4> operands;
AffineMap map;
buildTripCountMapAndOperands(forOp, &map, &operands);
@@ -164,7 +164,7 @@ uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) {
///
/// Returns false in cases with more than one AffineApplyOp, this is
/// conservative.
-static bool isAccessIndexInvariant(ValuePtr iv, ValuePtr index) {
+static bool isAccessIndexInvariant(Value iv, Value index) {
assert(isForInductionVar(iv) && "iv must be a AffineForOp");
assert(index->getType().isa<IndexType>() && "index must be of IndexType");
SmallVector<Operation *, 4> affineApplyOps;
@@ -188,9 +188,8 @@ static bool isAccessIndexInvariant(ValuePtr iv, ValuePtr index) {
return !(AffineValueMap(composeOp).isFunctionOf(0, iv));
}
-DenseSet<ValuePtr> mlir::getInvariantAccesses(ValuePtr iv,
- ArrayRef<ValuePtr> indices) {
- DenseSet<ValuePtr> res;
+DenseSet<Value> mlir::getInvariantAccesses(Value iv, ArrayRef<Value> indices) {
+ DenseSet<Value> res;
for (unsigned idx = 0, n = indices.size(); idx < n; ++idx) {
auto val = indices[idx];
if (isAccessIndexInvariant(iv, val)) {
@@ -220,7 +219,7 @@ DenseSet<ValuePtr> mlir::getInvariantAccesses(ValuePtr iv,
///
// TODO(ntv): check strides.
template <typename LoadOrStoreOp>
-static bool isContiguousAccess(ValuePtr iv, LoadOrStoreOp memoryOp,
+static bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
int *memRefDim) {
static_assert(std::is_same<LoadOrStoreOp, AffineLoadOp>::value ||
std::is_same<LoadOrStoreOp, AffineStoreOp>::value,
@@ -241,11 +240,11 @@ static bool isContiguousAccess(ValuePtr iv, LoadOrStoreOp memoryOp,
int uniqueVaryingIndexAlongIv = -1;
auto accessMap = memoryOp.getAffineMap();
- SmallVector<ValuePtr, 4> mapOperands(memoryOp.getMapOperands());
+ SmallVector<Value, 4> mapOperands(memoryOp.getMapOperands());
unsigned numDims = accessMap.getNumDims();
for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
// Gather map operands used result expr 'i' in 'exprOperands'.
- SmallVector<ValuePtr, 4> exprOperands;
+ SmallVector<Value, 4> exprOperands;
auto resultExpr = accessMap.getResult(i);
resultExpr.walk([&](AffineExpr expr) {
if (auto dimExpr = expr.dyn_cast<AffineDimExpr>())
@@ -373,7 +372,7 @@ bool mlir::isInstwiseShiftValid(AffineForOp forOp, ArrayRef<uint64_t> shifts) {
// Validate the results of this operation if it were to be shifted.
for (unsigned i = 0, e = op.getNumResults(); i < e; ++i) {
- ValuePtr result = op.getResult(i);
+ Value result = op.getResult(i);
for (auto *user : result->getUsers()) {
// If an ancestor operation doesn't lie in the block of forOp,
// there is no shift to check.
diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index 0e7d10e78cf..8ddf2e274eb 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -51,7 +51,7 @@ ComputationSliceState::getAsConstraints(FlatAffineConstraints *cst) {
// Adds operands (dst ivs and symbols) as symbols in 'cst'.
unsigned numSymbols = lbOperands[0].size();
- SmallVector<ValuePtr, 4> values(ivs);
+ SmallVector<Value, 4> values(ivs);
// Append 'ivs' then 'operands' to 'values'.
values.append(lbOperands[0].begin(), lbOperands[0].end());
cst->reset(numDims, numSymbols, 0, values);
@@ -176,7 +176,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
if (rank == 0) {
SmallVector<AffineForOp, 4> ivs;
getLoopIVs(*op, &ivs);
- SmallVector<ValuePtr, 8> regionSymbols;
+ SmallVector<Value, 8> regionSymbols;
extractForInductionVars(ivs, &regionSymbols);
// A rank 0 memref has a 0-d region.
cst.reset(rank, loopDepth, 0, regionSymbols);
@@ -192,7 +192,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
unsigned numSymbols = accessMap.getNumSymbols();
unsigned numOperands = accessValueMap.getNumOperands();
// Merge operands with slice operands.
- SmallVector<ValuePtr, 4> operands;
+ SmallVector<Value, 4> operands;
operands.resize(numOperands);
for (unsigned i = 0; i < numOperands; ++i)
operands[i] = accessValueMap.getOperand(i);
@@ -269,7 +269,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
getLoopIVs(*op, &enclosingIVs);
assert(loopDepth <= enclosingIVs.size() && "invalid loop depth");
enclosingIVs.resize(loopDepth);
- SmallVector<ValuePtr, 4> ids;
+ SmallVector<Value, 4> ids;
cst.getIdValues(cst.getNumDimIds(), cst.getNumDimAndSymbolIds(), &ids);
for (auto id : ids) {
AffineForOp iv;
@@ -336,9 +336,9 @@ Optional<int64_t> MemRefRegion::getRegionSize() {
// Indices to use for the DmaStart op.
// Indices for the original memref being DMAed from/to.
- SmallVector<ValuePtr, 4> memIndices;
+ SmallVector<Value, 4> memIndices;
// Indices for the faster buffer being DMAed into/from.
- SmallVector<ValuePtr, 4> bufIndices;
+ SmallVector<Value, 4> bufIndices;
// Compute the extents of the buffer.
Optional<int64_t> numElements = getConstantBoundingSizeAndShape();
@@ -471,7 +471,7 @@ static Operation *getInstAtPosition(ArrayRef<unsigned> positions,
}
// Adds loop IV bounds to 'cst' for loop IVs not found in 'ivs'.
-LogicalResult addMissingLoopIVBounds(SmallPtrSet<ValuePtr, 8> &ivs,
+LogicalResult addMissingLoopIVBounds(SmallPtrSet<Value, 8> &ivs,
FlatAffineConstraints *cst) {
for (unsigned i = 0, e = cst->getNumDimIds(); i < e; ++i) {
auto value = cst->getIdValue(i);
@@ -587,10 +587,10 @@ LogicalResult mlir::computeSliceUnion(ArrayRef<Operation *> opsA,
// Pre-constraint id alignment: record loop IVs used in each constraint
// system.
- SmallPtrSet<ValuePtr, 8> sliceUnionIVs;
+ SmallPtrSet<Value, 8> sliceUnionIVs;
for (unsigned k = 0, l = sliceUnionCst.getNumDimIds(); k < l; ++k)
sliceUnionIVs.insert(sliceUnionCst.getIdValue(k));
- SmallPtrSet<ValuePtr, 8> tmpSliceIVs;
+ SmallPtrSet<Value, 8> tmpSliceIVs;
for (unsigned k = 0, l = tmpSliceCst.getNumDimIds(); k < l; ++k)
tmpSliceIVs.insert(tmpSliceCst.getIdValue(k));
@@ -650,7 +650,7 @@ LogicalResult mlir::computeSliceUnion(ArrayRef<Operation *> opsA,
&sliceUnion->ubs);
// Add slice bound operands of union.
- SmallVector<ValuePtr, 4> sliceBoundOperands;
+ SmallVector<Value, 4> sliceBoundOperands;
sliceUnionCst.getIdValues(numSliceLoopIVs,
sliceUnionCst.getNumDimAndSymbolIds(),
&sliceBoundOperands);
@@ -716,7 +716,7 @@ void mlir::getComputationSliceState(
&sliceState->lbs, &sliceState->ubs);
// Set up bound operands for the slice's lower and upper bounds.
- SmallVector<ValuePtr, 4> sliceBoundOperands;
+ SmallVector<Value, 4> sliceBoundOperands;
unsigned numDimsAndSymbols = dependenceConstraints->getNumDimAndSymbolIds();
for (unsigned i = 0; i < numDimsAndSymbols; ++i) {
if (i < offset || i >= offset + numSliceLoopIVs) {
@@ -734,7 +734,7 @@ void mlir::getComputationSliceState(
isBackwardSlice ? dstLoopIVs[loopDepth - 1].getBody()->begin()
: std::prev(srcLoopIVs[loopDepth - 1].getBody()->end());
- llvm::SmallDenseSet<ValuePtr, 8> sequentialLoops;
+ llvm::SmallDenseSet<Value, 8> sequentialLoops;
if (isa<AffineLoadOp>(depSourceOp) && isa<AffineLoadOp>(depSinkOp)) {
// For read-read access pairs, clear any slice bounds on sequential loops.
// Get sequential loops in loop nest rooted at 'srcLoopIVs[0]'.
@@ -749,7 +749,7 @@ void mlir::getComputationSliceState(
return isBackwardSlice ? srcLoopIVs[i] : dstLoopIVs[i];
};
for (unsigned i = 0; i < numSliceLoopIVs; ++i) {
- ValuePtr iv = getSliceLoop(i).getInductionVar();
+ Value iv = getSliceLoop(i).getInductionVar();
if (sequentialLoops.count(iv) == 0 &&
getSliceLoop(i).getAttr(kSliceFusionBarrierAttrName) == nullptr)
continue;
@@ -910,7 +910,7 @@ static Optional<int64_t> getMemoryFootprintBytes(Block &block,
Block::iterator start,
Block::iterator end,
int memorySpace) {
- SmallDenseMap<ValuePtr, std::unique_ptr<MemRefRegion>, 4> regions;
+ SmallDenseMap<Value, std::unique_ptr<MemRefRegion>, 4> regions;
// Walk this 'affine.for' operation to gather all memory regions.
auto result = block.walk(start, end, [&](Operation *opInst) -> WalkResult {
@@ -960,8 +960,8 @@ Optional<int64_t> mlir::getMemoryFootprintBytes(AffineForOp forOp,
/// Returns in 'sequentialLoops' all sequential loops in loop nest rooted
/// at 'forOp'.
-void mlir::getSequentialLoops(
- AffineForOp forOp, llvm::SmallDenseSet<ValuePtr, 8> *sequentialLoops) {
+void mlir::getSequentialLoops(AffineForOp forOp,
+ llvm::SmallDenseSet<Value, 8> *sequentialLoops) {
forOp.getOperation()->walk([&](Operation *op) {
if (auto innerFor = dyn_cast<AffineForOp>(op))
if (!isLoopParallel(innerFor))
diff --git a/mlir/lib/Analysis/VectorAnalysis.cpp b/mlir/lib/Analysis/VectorAnalysis.cpp
index cd77eff9e40..1c7dbed5fac 100644
--- a/mlir/lib/Analysis/VectorAnalysis.cpp
+++ b/mlir/lib/Analysis/VectorAnalysis.cpp
@@ -100,7 +100,7 @@ Optional<SmallVector<int64_t, 4>> mlir::shapeRatio(VectorType superVectorType,
/// Examples can be found in the documentation of `makePermutationMap`, in the
/// header file.
static AffineMap makePermutationMap(
- ArrayRef<ValuePtr> indices,
+ ArrayRef<Value> indices,
const DenseMap<Operation *, unsigned> &enclosingLoopToVectorDim) {
if (enclosingLoopToVectorDim.empty())
return AffineMap();
@@ -158,7 +158,7 @@ static SetVector<Operation *> getEnclosingforOps(Operation *op) {
}
AffineMap mlir::makePermutationMap(
- Operation *op, ArrayRef<ValuePtr> indices,
+ Operation *op, ArrayRef<Value> indices,
const DenseMap<Operation *, unsigned> &loopToVectorDim) {
DenseMap<Operation *, unsigned> enclosingLoopToVectorDim;
auto enclosingLoops = getEnclosingforOps(op);
OpenPOWER on IntegriCloud