summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis/LoopAccessAnalysis.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis/LoopAccessAnalysis.cpp')
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp71
1 files changed, 71 insertions, 0 deletions
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index eb633196d33..359e0efbfc1 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -1107,6 +1107,77 @@ static unsigned getAddressSpaceOperand(Value *I) {
return -1;
}
+// TODO:This API can be improved by using the permutation of given width as the
+// accesses are entered into the map.
+bool llvm::sortLoadAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
+ ScalarEvolution &SE,
+ SmallVectorImpl<Value *> &Sorted,
+ SmallVectorImpl<unsigned> *Mask) {
+ SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs;
+ OffValPairs.reserve(VL.size());
+ Sorted.reserve(VL.size());
+
+ // Walk over the pointers, and map each of them to an offset relative to
+ // first pointer in the array.
+ Value *Ptr0 = getPointerOperand(VL[0]);
+ const SCEV *Scev0 = SE.getSCEV(Ptr0);
+ Value *Obj0 = GetUnderlyingObject(Ptr0, DL);
+ PointerType *PtrTy = dyn_cast<PointerType>(Ptr0->getType());
+ uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
+
+ for (auto *Val : VL) {
+ // The only kind of access we care about here is load.
+ if (!isa<LoadInst>(Val))
+ return false;
+
+ Value *Ptr = getPointerOperand(Val);
+ assert(Ptr && "Expected value to have a pointer operand.");
+ // If a pointer refers to a different underlying object, bail - the
+ // pointers are by definition incomparable.
+ Value *CurrObj = GetUnderlyingObject(Ptr, DL);
+ if (CurrObj != Obj0)
+ return false;
+
+ const SCEVConstant *Diff =
+ dyn_cast<SCEVConstant>(SE.getMinusSCEV(SE.getSCEV(Ptr), Scev0));
+ // The pointers may not have a constant offset from each other, or SCEV
+ // may just not be smart enough to figure out they do. Regardless,
+ // there's nothing we can do.
+ if (!Diff || static_cast<unsigned>(Diff->getAPInt().abs().getSExtValue()) >
+ (VL.size() - 1) * Size)
+ return false;
+
+ OffValPairs.emplace_back(Diff->getAPInt().getSExtValue(), Val);
+ }
+ SmallVector<unsigned, 4> UseOrder(VL.size());
+ for (unsigned i = 0; i < VL.size(); i++) {
+ UseOrder[i] = i;
+ }
+
+ // Sort the memory accesses and keep the order of their uses in UseOrder.
+ std::sort(UseOrder.begin(), UseOrder.end(),
+ [&OffValPairs](unsigned Left, unsigned Right) {
+ return OffValPairs[Left].first < OffValPairs[Right].first;
+ });
+
+ for (unsigned i = 0; i < VL.size(); i++)
+ Sorted.emplace_back(OffValPairs[UseOrder[i]].second);
+
+ // Sort UseOrder to compute the Mask.
+ if (Mask) {
+ Mask->reserve(VL.size());
+ for (unsigned i = 0; i < VL.size(); i++)
+ Mask->emplace_back(i);
+ std::sort(Mask->begin(), Mask->end(),
+ [&UseOrder](unsigned Left, unsigned Right) {
+ return UseOrder[Left] < UseOrder[Right];
+ });
+ }
+
+ return true;
+}
+
+
/// Returns true if the memory operations \p A and \p B are consecutive.
bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
ScalarEvolution &SE, bool CheckType) {
OpenPOWER on IntegriCloud