summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
diff options
context:
space:
mode:
authorMichael Kuperstein <mkuper@google.com>2016-07-22 21:28:48 +0000
committerMichael Kuperstein <mkuper@google.com>2016-07-22 21:28:48 +0000
commit38e72980936c94adc0a79db9225c5aa99cf42784 (patch)
tree4e8563f736cfabec0b1023bac9e372d0ccb66e8d /llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
parentcbc4377af18eec1e4e3b51d18fa92da404533d24 (diff)
downloadbcm5719-llvm-38e72980936c94adc0a79db9225c5aa99cf42784.tar.gz
bcm5719-llvm-38e72980936c94adc0a79db9225c5aa99cf42784.zip
[SLPVectorizer] Vectorize reverse-order loads in horizontal reductions
When vectorizing a tree rooted at a store bundle, we currently try to sort the stores before building the tree, so that the stores can be vectorized. For other trees, the order of the root bundle - which determines the order of all other bundles - is arbitrary. That is bad, since if a leaf bundle of consecutive loads happens to appear in the wrong order, we will not vectorize it. This is partially mitigated when the root is a binary operator, by trying to build a "reversed" tree when that's considered profitable. This patch extends the workaround we have for binops to trees rooted in a horizontal reduction. This fixes PR28474. Differential Revision: https://reviews.llvm.org/D22554 llvm-svn: 276477
Diffstat (limited to 'llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp')
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp69
1 files changed, 53 insertions, 16 deletions
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 8a3c4d14fec..1dcd33f8ee7 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -878,10 +878,10 @@ private:
/// List of users to ignore during scheduling and that don't need extracting.
ArrayRef<Value *> UserIgnoreList;
- // Number of load-bundles, which contain consecutive loads.
+ // Number of load bundles that contain consecutive loads.
int NumLoadsWantToKeepOrder;
- // Number of load-bundles of size 2, which are consecutive loads if reversed.
+ // Number of load bundles that contain consecutive loads in reversed order.
int NumLoadsWantToChangeOrder;
// Analysis and block reference.
@@ -1154,7 +1154,9 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
return;
}
- // Check if the loads are consecutive or of we need to swizzle them.
+
+ // Make sure all loads in the bundle are simple - we can't vectorize
+ // atomic or volatile loads.
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
LoadInst *L = cast<LoadInst>(VL[i]);
if (!L->isSimple()) {
@@ -1163,20 +1165,47 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
return;
}
+ }
+ // Check if the loads are consecutive, reversed, or neither.
+ // TODO: What we really want is to sort the loads, but for now, check
+ // the two likely directions.
+ bool Consecutive = true;
+ bool ReverseConsecutive = true;
+ for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
- if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], *DL, *SE)) {
- ++NumLoadsWantToChangeOrder;
- }
- BS.cancelScheduling(VL);
- newTreeEntry(VL, false);
- DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
- return;
+ Consecutive = false;
+ break;
+ } else {
+ ReverseConsecutive = false;
}
}
- ++NumLoadsWantToKeepOrder;
- newTreeEntry(VL, true);
- DEBUG(dbgs() << "SLP: added a vector of loads.\n");
+
+ if (Consecutive) {
+ ++NumLoadsWantToKeepOrder;
+ newTreeEntry(VL, true);
+ DEBUG(dbgs() << "SLP: added a vector of loads.\n");
+ return;
+ }
+
+ // If none of the load pairs were consecutive when checked in order,
+ // check the reverse order.
+ if (ReverseConsecutive)
+ for (unsigned i = VL.size() - 1; i > 0; --i)
+ if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) {
+ ReverseConsecutive = false;
+ break;
+ }
+
+ BS.cancelScheduling(VL);
+ newTreeEntry(VL, false);
+
+ if (ReverseConsecutive) {
+ ++NumLoadsWantToChangeOrder;
+ DEBUG(dbgs() << "SLP: Gathering reversed loads.\n");
+ } else {
+ DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
+ }
return;
}
case Instruction::ZExt:
@@ -3798,9 +3827,12 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
BuildVectorSlice = BuildVector.slice(i, OpsWidth);
R.buildTree(Ops, BuildVectorSlice);
- // TODO: check if we can allow reordering also for other cases than
- // tryToVectorizePair()
+ // TODO: check if we can allow reordering for more cases.
if (allowReorder && R.shouldReorder()) {
+ // Conceptually, there is nothing actually preventing us from trying to
+ // reorder a larger list. In fact, we do exactly this when vectorizing
+ // reductions. However, at this point, we only expect to get here from
+ // tryToVectorizePair().
assert(Ops.size() == 2);
assert(BuildVectorSlice.empty());
Value *ReorderedOps[] = { Ops[1], Ops[0] };
@@ -4078,7 +4110,12 @@ public:
unsigned i = 0;
for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) {
- V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps);
+ auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth);
+ V.buildTree(VL, ReductionOps);
+ if (V.shouldReorder()) {
+ SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend());
+ V.buildTree(Reversed, ReductionOps);
+ }
V.computeMinimumValueSizes();
// Estimate cost.
OpenPOWER on IntegriCloud