summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'mlir/lib/Transforms')
-rw-r--r--mlir/lib/Transforms/DmaGeneration.cpp6
-rw-r--r--mlir/lib/Transforms/LoopFusion.cpp2
-rw-r--r--mlir/lib/Transforms/LowerVectorTransfers.cpp8
-rw-r--r--mlir/lib/Transforms/MaterializeVectors.cpp4
-rw-r--r--mlir/lib/Transforms/PipelineDataTransfer.cpp4
-rw-r--r--mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp4
-rw-r--r--mlir/lib/Transforms/Vectorize.cpp5
7 files changed, 17 insertions, 16 deletions
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index 8b86056c8a9..e9d66ef74c3 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -114,7 +114,7 @@ struct StrideInfo {
/// successively nested.
// TODO(bondhugula): make this work with non-identity layout maps.
static void getMultiLevelStrides(const MemRefRegion &region,
- ArrayRef<int> bufferShape,
+ ArrayRef<int64_t> bufferShape,
SmallVectorImpl<StrideInfo> *strideInfos) {
if (bufferShape.size() <= 1)
return;
@@ -122,7 +122,7 @@ static void getMultiLevelStrides(const MemRefRegion &region,
int64_t numEltPerStride = 1;
int64_t stride = 1;
for (int d = bufferShape.size() - 1; d >= 1; d--) {
- int dimSize = region.memref->getType().cast<MemRefType>().getDimSize(d);
+ int64_t dimSize = region.memref->getType().cast<MemRefType>().getDimSize(d);
stride *= dimSize;
numEltPerStride *= bufferShape[d];
// A stride is needed only if the region has a shorter extent than the
@@ -169,7 +169,7 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, ForInst *forInst,
Value *zeroIndex = top.create<ConstantIndexOp>(loc, 0);
unsigned rank = memRefType.getRank();
- SmallVector<int, 4> fastBufferShape;
+ SmallVector<int64_t, 4> fastBufferShape;
// Compute the extents of the buffer.
std::vector<SmallVector<int64_t, 4>> lbs;
diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index 94d763fcbd1..24914878656 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -711,7 +711,7 @@ static Value *createPrivateMemRef(ForInst *forInst,
// Compute MemRefRegion for 'srcStoreOpInst' at depth 'dstLoopDepth'.
MemRefRegion region;
getMemRefRegion(srcStoreOpInst, dstLoopDepth, &region);
- SmallVector<int, 4> newShape;
+ SmallVector<int64_t, 4> newShape;
std::vector<SmallVector<int64_t, 4>> lbs;
SmallVector<int64_t, 8> lbDivisors;
lbs.reserve(rank);
diff --git a/mlir/lib/Transforms/LowerVectorTransfers.cpp b/mlir/lib/Transforms/LowerVectorTransfers.cpp
index ccda1385df4..19208d4c268 100644
--- a/mlir/lib/Transforms/LowerVectorTransfers.cpp
+++ b/mlir/lib/Transforms/LowerVectorTransfers.cpp
@@ -96,9 +96,9 @@ private:
MLFuncGlobalLoweringState *state;
MemRefType memrefType;
- ArrayRef<int> memrefShape;
+ ArrayRef<int64_t> memrefShape;
VectorType vectorType;
- ArrayRef<int> vectorShape;
+ ArrayRef<int64_t> vectorShape;
AffineMap permutationMap;
/// Used for staging the transfer in a local scalar buffer.
@@ -232,9 +232,9 @@ VectorTransferRewriter<VectorTransferOpTy>::makeVectorTransferAccessInfo() {
}
emitter
.template bindZipRangeConstants<ConstantIndexOp>(
- llvm::zip(lbs, SmallVector<int, 8>(ivs.size(), 0)))
+ llvm::zip(lbs, SmallVector<int64_t, 8>(ivs.size(), 0)))
.template bindZipRangeConstants<ConstantIndexOp>(
- llvm::zip(steps, SmallVector<int, 8>(ivs.size(), 1)));
+ llvm::zip(steps, SmallVector<int64_t, 8>(ivs.size(), 1)));
return VectorTransferAccessInfo{ivs,
makeExprs(lbs),
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 6085edd8e8e..e82390f8db9 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -187,7 +187,7 @@ struct MaterializationState {
MaterializationState() : hwVectorSize(clVectorSize.size(), 0) {
std::copy(clVectorSize.begin(), clVectorSize.end(), hwVectorSize.begin());
}
- SmallVector<int, 8> hwVectorSize;
+ SmallVector<int64_t, 8> hwVectorSize;
VectorType superVectorType;
VectorType hwVectorType;
SmallVector<unsigned, 8> hwVectorInstance;
@@ -458,7 +458,7 @@ static AffineMap projectedPermutationMap(VectorTransferOpTy *transfer,
SmallVector<AffineExpr, 4> keep;
MLIRContext *context = transfer->getInstruction()->getContext();
functional::zipApply(
- [&dim, &keep, context](int shape, int ratio) {
+ [&dim, &keep, context](int64_t shape, int64_t ratio) {
assert(shape >= ratio && "shape dim must be greater than ratio dim");
if (shape != ratio) {
// HW vector is not full instantiated along this dim, keep it.
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index 989af0071d7..9e7c928070f 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -87,8 +87,8 @@ static bool doubleBuffer(Value *oldMemRef, ForInst *forInst) {
// Doubles the shape with a leading dimension extent of 2.
auto doubleShape = [&](MemRefType oldMemRefType) -> MemRefType {
// Add the leading dimension in the shape for the double buffer.
- ArrayRef<int> oldShape = oldMemRefType.getShape();
- SmallVector<int, 4> newShape(1 + oldMemRefType.getRank());
+ ArrayRef<int64_t> oldShape = oldMemRefType.getShape();
+ SmallVector<int64_t, 4> newShape(1 + oldMemRefType.getRank());
newShape[0] = 2;
std::copy(oldShape.begin(), oldShape.end(), newShape.begin() + 1);
auto newMemRefType =
diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
index 7e5cac0d87c..ad966e8d280 100644
--- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
+++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
@@ -101,8 +101,8 @@ char VectorizerTestPass::passID = 0;
void VectorizerTestPass::testVectorShapeRatio(Function *f) {
using matcher::Op;
- SmallVector<int, 8> shape(clTestVectorShapeRatio.begin(),
- clTestVectorShapeRatio.end());
+ SmallVector<int64_t, 8> shape(clTestVectorShapeRatio.begin(),
+ clTestVectorShapeRatio.end());
auto subVectorType = VectorType::get(shape, Type::getF32(f->getContext()));
// Only filter instructions that operate on a strict super-vector and have one
// return. This makes testing easier.
diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp
index 58bb3901947..8a6d965ce0d 100644
--- a/mlir/lib/Transforms/Vectorize.cpp
+++ b/mlir/lib/Transforms/Vectorize.cpp
@@ -667,7 +667,7 @@ char Vectorize::passID = 0;
namespace {
struct VectorizationStrategy {
- ArrayRef<int> vectorSizes;
+ SmallVector<int64_t, 8> vectorSizes;
DenseMap<ForInst *, unsigned> loopToVectorDim;
};
@@ -1280,7 +1280,8 @@ PassResult Vectorize::runOnFunction(Function *f) {
for (auto m : matches) {
VectorizationStrategy strategy;
// TODO(ntv): depending on profitability, elect to reduce the vector size.
- strategy.vectorSizes = clVirtualVectorSize;
+ strategy.vectorSizes.assign(clVirtualVectorSize.begin(),
+ clVirtualVectorSize.end());
auto fail = analyzeProfitability(m.second, 1, patternDepth, &strategy);
if (fail) {
continue;
OpenPOWER on IntegriCloud