summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorAnna Thomas <anna@azul.com>2018-09-25 20:57:20 +0000
committerAnna Thomas <anna@azul.com>2018-09-25 20:57:20 +0000
commitb1e3d4531826e6134b05e8ff3c96a5696a72ad50 (patch)
treea9176db686066adc9afee51427604eaf8b78aaaa /llvm/lib
parentc81aff79d3ae0e81b3183e3f077627ea569ad4fc (diff)
downloadbcm5719-llvm-b1e3d4531826e6134b05e8ff3c96a5696a72ad50.tar.gz
bcm5719-llvm-b1e3d4531826e6134b05e8ff3c96a5696a72ad50.zip
[LV][LAA] Vectorize loop invariant values stored into loop invariant address
Summary: We are overly conservative in loop vectorizer with respect to stores to loop invariant addresses. More details in https://bugs.llvm.org/show_bug.cgi?id=38546 This is the first part of the fix where we start with vectorizing loop invariant values to loop invariant addresses. This also includes changes to ORE for stores to invariant address. Reviewers: anemet, Ayal, mkuper, mssimpso Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D50665 llvm-svn: 343028
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp21
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp5
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp40
3 files changed, 48 insertions, 18 deletions
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index a24d66011b8..8312a0d1cff 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -1862,10 +1862,21 @@ void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
// writes and between reads and writes, but not between reads and reads.
ValueSet Seen;
+ // Record uniform store addresses to identify if we have multiple stores
+ // to the same address.
+ ValueSet UniformStores;
+
for (StoreInst *ST : Stores) {
Value *Ptr = ST->getPointerOperand();
- // Check for store to loop invariant address.
- StoreToLoopInvariantAddress |= isUniform(Ptr);
+
+ if (isUniform(Ptr)) {
+ // Consider multiple stores to the same uniform address as a store of a
+ // variant value.
+ bool MultipleStoresToUniformPtr = !UniformStores.insert(Ptr).second;
+ HasVariantStoreToLoopInvariantAddress |=
+ (!isUniform(ST->getValueOperand()) || MultipleStoresToUniformPtr);
+ }
+
// If we did *not* see this pointer before, insert it to the read-write
// list. At this phase it is only a 'write' list.
if (Seen.insert(Ptr).second) {
@@ -2265,7 +2276,7 @@ LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
PtrRtChecking(llvm::make_unique<RuntimePointerChecking>(SE)),
DepChecker(llvm::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L),
NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false),
- StoreToLoopInvariantAddress(false) {
+ HasVariantStoreToLoopInvariantAddress(false) {
if (canAnalyzeLoop())
analyzeLoop(AA, LI, TLI, DT);
}
@@ -2297,8 +2308,8 @@ void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
PtrRtChecking->print(OS, Depth);
OS << "\n";
- OS.indent(Depth) << "Store to invariant address was "
- << (StoreToLoopInvariantAddress ? "" : "not ")
+ OS.indent(Depth) << "Variant Store to invariant address was "
+ << (HasVariantStoreToLoopInvariantAddress ? "" : "not ")
<< "found in loop.\n";
OS.indent(Depth) << "SCEV assumptions:\n";
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index 9c81cdc9083..7e11504c0e0 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -817,9 +817,10 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
if (!LAI->canVectorizeMemory())
return false;
- if (LAI->hasStoreToLoopInvariantAddress()) {
+ if (LAI->hasVariantStoreToLoopInvariantAddress()) {
ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress")
- << "write to a loop invariant address could not be vectorized");
+ << "write of variant value to a loop invariant address could not "
+ "be vectorized");
LLVM_DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
return false;
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index e1795c5e056..1f2aa70a5ff 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -1174,8 +1174,11 @@ private:
/// memory access.
unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
- /// The cost calculation for Load instruction \p I with uniform pointer -
- /// scalar load + broadcast.
+ /// The cost calculation for Load/Store instruction \p I with uniform pointer -
+ /// Load: scalar load + broadcast.
+ /// Store: scalar store + (loop invariant value stored? 0 : extract of last
+ /// element)
+ /// TODO: Test the extra cost of the extract when loop variant value stored.
unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
/// Returns whether the instruction is a load or store and will be a emitted
@@ -5297,15 +5300,23 @@ unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
unsigned VF) {
- LoadInst *LI = cast<LoadInst>(I);
- Type *ValTy = LI->getType();
+ Type *ValTy = getMemInstValueType(I);
Type *VectorTy = ToVectorTy(ValTy, VF);
- unsigned Alignment = LI->getAlignment();
- unsigned AS = LI->getPointerAddressSpace();
+ unsigned Alignment = getLoadStoreAlignment(I);
+ unsigned AS = getLoadStoreAddressSpace(I);
+ if (isa<LoadInst>(I)) {
+ return TTI.getAddressComputationCost(ValTy) +
+ TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
+ TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
+ }
+ StoreInst *SI = cast<StoreInst>(I);
+ bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
return TTI.getAddressComputationCost(ValTy) +
- TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
- TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
+ TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) +
+ (isLoopInvariantStoreValue ? 0 : TTI.getVectorInstrCost(
+ Instruction::ExtractElement,
+ VectorTy, VF - 1));
}
unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
@@ -5404,15 +5415,22 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
if (!Ptr)
continue;
+ // TODO: We should generate better code and update the cost model for
+ // predicated uniform stores. Today they are treated as any other
+ // predicated store (see added test cases in
+ // invariant-store-vectorization.ll).
if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
NumPredStores++;
- if (isa<LoadInst>(&I) && Legal->isUniform(Ptr) &&
- // Conditional loads should be scalarized and predicated.
+ if (Legal->isUniform(Ptr) &&
+ // Conditional loads and stores should be scalarized and predicated.
// isScalarWithPredication cannot be used here since masked
// gather/scatters are not considered scalar with predication.
!Legal->blockNeedsPredication(I.getParent())) {
- // Scalar load + broadcast
+ // TODO: Avoid replicating loads and stores instead of
+ // relying on instcombine to remove them.
+ // Load: Scalar load + broadcast
+ // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
unsigned Cost = getUniformMemOpCost(&I, VF);
setWideningDecision(&I, VF, CM_Scalarize, Cost);
continue;
OpenPOWER on IntegriCloud