summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis/VectorUtils.cpp
diff options
context:
space:
mode:
authorFlorian Hahn <florian.hahn@arm.com>2018-11-13 15:58:18 +0000
committerFlorian Hahn <florian.hahn@arm.com>2018-11-13 15:58:18 +0000
commita4dc7feeea371166cc24dc617da59b2302dd2410 (patch)
tree712238e4b06f2c2890aa1d49d02c0d6d0304a5d6 /llvm/lib/Analysis/VectorUtils.cpp
parentde6480a38c4e6ab1e7c0c11fc7be87a4742d2d45 (diff)
downloadbcm5719-llvm-a4dc7feeea371166cc24dc617da59b2302dd2410.tar.gz
bcm5719-llvm-a4dc7feeea371166cc24dc617da59b2302dd2410.zip
[VPlan] VPlan version of InterleavedAccessInfo.
This patch turns InterleaveGroup into a template with the instruction type being a template parameter. It also adds a VPInterleavedAccessInfo class, which only contains a mapping from VPInstructions to their respective InterleaveGroup. As we do not have access to scalar evolution in VPlan, we can re-use convert InterleavedAccessInfo to VPInterleavedAccess info. Reviewers: Ayal, mssimpso, hfinkel, dcaballe, rengolin, mkuper, hsaito Reviewed By: rengolin Differential Revision: https://reviews.llvm.org/D49489 llvm-svn: 346758
Diffstat (limited to 'llvm/lib/Analysis/VectorUtils.cpp')
-rw-r--r--llvm/lib/Analysis/VectorUtils.cpp34
1 files changed, 24 insertions, 10 deletions
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index 2cf4baf7af6..9c3bf7df89b 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -505,8 +505,9 @@ Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
return Inst;
}
-Constant *llvm::createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF,
- const InterleaveGroup &Group) {
+Constant *
+llvm::createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF,
+ const InterleaveGroup<Instruction> &Group) {
// All 1's means mask is not needed.
if (Group.getNumMembers() == Group.getFactor())
return nullptr;
@@ -720,9 +721,9 @@ void InterleavedAccessInfo::analyzeInterleaving(
collectDependences();
// Holds all interleaved store groups temporarily.
- SmallSetVector<InterleaveGroup *, 4> StoreGroups;
+ SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
// Holds all interleaved load groups temporarily.
- SmallSetVector<InterleaveGroup *, 4> LoadGroups;
+ SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
// Search in bottom-up program order for pairs of accesses (A and B) that can
// form interleaved load or store groups. In the algorithm below, access A
@@ -744,7 +745,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
// Initialize a group for B if it has an allowable stride. Even if we don't
// create a group for B, we continue with the bottom-up algorithm to ensure
// we don't break any of B's dependences.
- InterleaveGroup *Group = nullptr;
+ InterleaveGroup<Instruction> *Group = nullptr;
if (isStrided(DesB.Stride) &&
(!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
Group = getInterleaveGroup(B);
@@ -789,7 +790,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
// illegal code motion. A will then be free to form another group with
// instructions that precede it.
if (isInterleaved(A)) {
- InterleaveGroup *StoreGroup = getInterleaveGroup(A);
+ InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A);
StoreGroups.remove(StoreGroup);
releaseGroup(StoreGroup);
}
@@ -868,7 +869,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
} // Iteration over B accesses.
// Remove interleaved store groups with gaps.
- for (InterleaveGroup *Group : StoreGroups)
+ for (auto *Group : StoreGroups)
if (Group->getNumMembers() != Group->getFactor()) {
LLVM_DEBUG(
dbgs() << "LV: Invalidate candidate interleaved store group due "
@@ -889,7 +890,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
// This means that we can forcefully peel the loop in order to only have to
// check the first pointer for no-wrap. When we'll change to use Assume=true
// we'll only need at most one runtime check per interleaved group.
- for (InterleaveGroup *Group : LoadGroups) {
+ for (auto *Group : LoadGroups) {
// Case 1: A full group. Can Skip the checks; For full groups, if the wide
// load would wrap around the address space we would do a memory access at
// nullptr even without the transformation.
@@ -947,9 +948,9 @@ void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
return;
// Avoid releasing a Group twice.
- SmallPtrSet<InterleaveGroup *, 4> DelSet;
+ SmallPtrSet<InterleaveGroup<Instruction> *, 4> DelSet;
for (auto &I : InterleaveGroupMap) {
- InterleaveGroup *Group = I.second;
+ InterleaveGroup<Instruction> *Group = I.second;
if (Group->requiresScalarEpilogue())
DelSet.insert(Group);
}
@@ -964,3 +965,16 @@ void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
RequiresScalarEpilogue = false;
}
+
+template <>
+void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
+ SmallVector<Value *, 4> VL;
+ std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
+ [](std::pair<int, Instruction *> p) { return p.second; });
+ propagateMetadata(NewInst, VL);
+}
+
+template <typename InstT>
+void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
+ llvm_unreachable("addMetadata can only be used for Instruction");
+}
OpenPOWER on IntegriCloud