summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis/LoopAccessAnalysis.cpp
diff options
context:
space:
mode:
authorHans Wennborg <hans@hanshq.net>2017-10-03 18:32:29 +0000
committerHans Wennborg <hans@hanshq.net>2017-10-03 18:32:29 +0000
commit9a9048e19fc8cfa1c3a036891a4bb137dd6b771d (patch)
tree2917a4b586b6fba28911506400be4cbf4c8ecd01 /llvm/lib/Analysis/LoopAccessAnalysis.cpp
parent494cfd48ef74b92ed2d52ffde59bb41cce65489a (diff)
downloadbcm5719-llvm-9a9048e19fc8cfa1c3a036891a4bb137dd6b771d.tar.gz
bcm5719-llvm-9a9048e19fc8cfa1c3a036891a4bb137dd6b771d.zip
Revert r314806 "[SLP] Vectorize jumbled memory loads."
All the buildbots are red, e.g. http://lab.llvm.org:8011/builders/clang-cmake-aarch64-lld/builds/2436/ > Summary: > This patch tries to vectorize loads of consecutive memory accesses, accessed > in non-consecutive or jumbled way. An earlier attempt was made with patch D26905 > which was reverted back due to some basic issue with representing the 'use mask' of > jumbled accesses. > > This patch fixes the mask representation by recording the 'use mask' in the usertree entry. > > Change-Id: I9fe7f5045f065d84c126fa307ef6ebe0787296df > > Reviewers: mkuper, loladiro, Ayal, zvi, danielcdh > > Reviewed By: Ayal > > Subscribers: hans, mzolotukhin > > Differential Revision: https://reviews.llvm.org/D36130 llvm-svn: 314824
Diffstat (limited to 'llvm/lib/Analysis/LoopAccessAnalysis.cpp')
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp71
1 files changed, 0 insertions, 71 deletions
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 359e0efbfc1..eb633196d33 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -1107,77 +1107,6 @@ static unsigned getAddressSpaceOperand(Value *I) {
return -1;
}
-// TODO:This API can be improved by using the permutation of given width as the
-// accesses are entered into the map.
-bool llvm::sortLoadAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
- ScalarEvolution &SE,
- SmallVectorImpl<Value *> &Sorted,
- SmallVectorImpl<unsigned> *Mask) {
- SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs;
- OffValPairs.reserve(VL.size());
- Sorted.reserve(VL.size());
-
- // Walk over the pointers, and map each of them to an offset relative to
- // first pointer in the array.
- Value *Ptr0 = getPointerOperand(VL[0]);
- const SCEV *Scev0 = SE.getSCEV(Ptr0);
- Value *Obj0 = GetUnderlyingObject(Ptr0, DL);
- PointerType *PtrTy = dyn_cast<PointerType>(Ptr0->getType());
- uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
-
- for (auto *Val : VL) {
- // The only kind of access we care about here is load.
- if (!isa<LoadInst>(Val))
- return false;
-
- Value *Ptr = getPointerOperand(Val);
- assert(Ptr && "Expected value to have a pointer operand.");
- // If a pointer refers to a different underlying object, bail - the
- // pointers are by definition incomparable.
- Value *CurrObj = GetUnderlyingObject(Ptr, DL);
- if (CurrObj != Obj0)
- return false;
-
- const SCEVConstant *Diff =
- dyn_cast<SCEVConstant>(SE.getMinusSCEV(SE.getSCEV(Ptr), Scev0));
- // The pointers may not have a constant offset from each other, or SCEV
- // may just not be smart enough to figure out they do. Regardless,
- // there's nothing we can do.
- if (!Diff || static_cast<unsigned>(Diff->getAPInt().abs().getSExtValue()) >
- (VL.size() - 1) * Size)
- return false;
-
- OffValPairs.emplace_back(Diff->getAPInt().getSExtValue(), Val);
- }
- SmallVector<unsigned, 4> UseOrder(VL.size());
- for (unsigned i = 0; i < VL.size(); i++) {
- UseOrder[i] = i;
- }
-
- // Sort the memory accesses and keep the order of their uses in UseOrder.
- std::sort(UseOrder.begin(), UseOrder.end(),
- [&OffValPairs](unsigned Left, unsigned Right) {
- return OffValPairs[Left].first < OffValPairs[Right].first;
- });
-
- for (unsigned i = 0; i < VL.size(); i++)
- Sorted.emplace_back(OffValPairs[UseOrder[i]].second);
-
- // Sort UseOrder to compute the Mask.
- if (Mask) {
- Mask->reserve(VL.size());
- for (unsigned i = 0; i < VL.size(); i++)
- Mask->emplace_back(i);
- std::sort(Mask->begin(), Mask->end(),
- [&UseOrder](unsigned Left, unsigned Right) {
- return UseOrder[Left] < UseOrder[Right];
- });
- }
-
- return true;
-}
-
-
/// Returns true if the memory operations \p A and \p B are consecutive.
bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
ScalarEvolution &SE, bool CheckType) {
OpenPOWER on IntegriCloud